1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmClasses.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/nmethod.hpp"
30 #include "code/pcDesc.hpp"
31 #include "code/scopeDesc.hpp"
32 #include "code/vtableStubs.hpp"
33 #include "compiler/compilationMemoryStatistic.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/g1/g1HeapRegion.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "interpreter/bytecode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/linkResolver.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logStream.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/flatArrayKlass.hpp"
48 #include "oops/flatArrayOop.inline.hpp"
49 #include "oops/klass.inline.hpp"
50 #include "oops/objArrayKlass.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "oops/typeArrayOop.inline.hpp"
53 #include "opto/ad.hpp"
54 #include "opto/addnode.hpp"
55 #include "opto/callnode.hpp"
56 #include "opto/cfgnode.hpp"
57 #include "opto/graphKit.hpp"
58 #include "opto/machnode.hpp"
59 #include "opto/matcher.hpp"
60 #include "opto/memnode.hpp"
61 #include "opto/mulnode.hpp"
62 #include "opto/output.hpp"
63 #include "opto/runtime.hpp"
64 #include "opto/subnode.hpp"
65 #include "prims/jvmtiExport.hpp"
66 #include "runtime/atomicAccess.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/interfaceSupport.inline.hpp"
70 #include "runtime/javaCalls.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/signature.hpp"
73 #include "runtime/stackWatermarkSet.hpp"
74 #include "runtime/synchronizer.hpp"
75 #include "runtime/threadWXSetters.inline.hpp"
76 #include "runtime/vframe.hpp"
77 #include "runtime/vframe_hp.hpp"
78 #include "runtime/vframeArray.hpp"
79 #include "utilities/copy.hpp"
80 #include "utilities/preserveException.hpp"
81
82
83 // For debugging purposes:
84 // To force FullGCALot inside a runtime function, add the following two lines
85 //
86 // Universe::release_fullgc_alot_dummy();
87 // Universe::heap()->collect();
88 //
89 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
90
91
92 #define C2_BLOB_FIELD_DEFINE(name, type) \
93 type* OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr;
94 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
95 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
96 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
97 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \
98 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr;
99 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE)
100 #undef C2_BLOB_FIELD_DEFINE
101 #undef C2_STUB_FIELD_DEFINE
102 #undef C2_JVMTI_STUB_FIELD_DEFINE
103
104 // This should be called in an assertion at the start of OptoRuntime routines
105 // which are entered from compiled code (all of them)
106 #ifdef ASSERT
107 static bool check_compiled_frame(JavaThread* thread) {
108 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
109 RegisterMap map(thread,
110 RegisterMap::UpdateMap::skip,
111 RegisterMap::ProcessFrames::include,
112 RegisterMap::WalkContinuation::skip);
113 frame caller = thread->last_frame().sender(&map);
114 assert(caller.is_compiled_frame(), "not being called from compiled like code");
115 return true;
116 }
117 #endif // ASSERT
118
119 /*
120 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
121 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
122 if (var == nullptr) { return false; }
123 */
124
125 #define GEN_C2_BLOB(name, type) \
126 BLOB_FIELD_NAME(name) = \
127 generate_ ## name ## _blob(); \
128 if (BLOB_FIELD_NAME(name) == nullptr) { return false; }
129
130 // a few helper macros to conjure up generate_stub call arguments
131 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
132 #define C2_STUB_TYPEFUNC(name) name ## _Type
133 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C)
134 #define C2_STUB_ID(name) StubId:: JOIN3(c2, name, id)
135 #define C2_STUB_NAME(name) stub_name(C2_STUB_ID(name))
136
137 // Almost all the C functions targeted from the generated stubs are
138 // implemented locally to OptoRuntime with names that can be generated
139 // from the stub name by appending suffix '_C'. However, in two cases
140 // a common target method also needs to be called from shared runtime
141 // stubs. In these two cases the opto stubs rely on method
142 // imlementations defined in class SharedRuntime. The following
143 // defines temporarily rebind the generated names to reference the
144 // relevant implementations.
145
146 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \
147 C2_STUB_FIELD_NAME(name) = \
148 generate_stub(env, \
149 C2_STUB_TYPEFUNC(name), \
150 C2_STUB_C_FUNC(name), \
151 C2_STUB_NAME(name), \
152 C2_STUB_ID(name), \
153 fancy_jump, \
154 pass_tls, \
155 pass_retpc); \
156 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \
157
158 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name)
159
160 #define GEN_C2_JVMTI_STUB(name) \
161 STUB_FIELD_NAME(name) = \
162 generate_stub(env, \
163 notify_jvmti_vthread_Type, \
164 C2_JVMTI_STUB_C_FUNC(name), \
165 C2_STUB_NAME(name), \
166 C2_STUB_ID(name), \
167 0, \
168 true, \
169 false); \
170 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \
171
172 bool OptoRuntime::generate(ciEnv* env) {
173
174 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB)
175
176 return true;
177 }
178
179 #undef GEN_C2_BLOB
180
181 #undef C2_STUB_FIELD_NAME
182 #undef C2_STUB_TYPEFUNC
183 #undef C2_STUB_C_FUNC
184 #undef C2_STUB_NAME
185 #undef GEN_C2_STUB
186
187 #undef C2_JVMTI_STUB_C_FUNC
188 #undef GEN_C2_JVMTI_STUB
189 // #undef gen
190
191 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr;
192 const TypeFunc* OptoRuntime::_new_array_Type = nullptr;
193 const TypeFunc* OptoRuntime::_new_array_nozero_Type = nullptr;
194 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr;
195 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr;
196 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr;
197 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr;
198 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr;
199 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr;
200 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr;
201 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr;
202 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr;
203 const TypeFunc* OptoRuntime::_athrow_Type = nullptr;
204 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr;
205 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr;
206 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr;
207 const TypeFunc* OptoRuntime::_modf_Type = nullptr;
208 const TypeFunc* OptoRuntime::_l2f_Type = nullptr;
209 const TypeFunc* OptoRuntime::_void_long_Type = nullptr;
210 const TypeFunc* OptoRuntime::_void_void_Type = nullptr;
211 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr;
212 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr;
213 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr;
214 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr;
215 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr;
216 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr;
217 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr;
218 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr;
219 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr;
220 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr;
221 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr;
222 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr;
223 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr;
224 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr;
225 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr;
226 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr;
227 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr;
228 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr;
229 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr;
230 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr;
231 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr;
232 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr;
233 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr;
234 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr;
235 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr;
236 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr;
237 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr;
238 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr;
239 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr;
240 const TypeFunc* OptoRuntime::_kyberNtt_Type = nullptr;
241 const TypeFunc* OptoRuntime::_kyberInverseNtt_Type = nullptr;
242 const TypeFunc* OptoRuntime::_kyberNttMult_Type = nullptr;
243 const TypeFunc* OptoRuntime::_kyberAddPoly_2_Type = nullptr;
244 const TypeFunc* OptoRuntime::_kyberAddPoly_3_Type = nullptr;
245 const TypeFunc* OptoRuntime::_kyber12To16_Type = nullptr;
246 const TypeFunc* OptoRuntime::_kyberBarrettReduce_Type = nullptr;
247 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr;
248 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr;
249 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr;
250 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr;
251 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr;
252 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr;
253 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr;
254 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr;
255 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr;
256 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr;
257 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr;
258 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr;
259 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr;
260 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr;
261 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr;
262 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr;
263 #if INCLUDE_JFR
264 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr;
265 #endif // INCLUDE_JFR
266 #if INCLUDE_JVMTI
267 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type = nullptr;
268 #endif // INCLUDE_JVMTI
269 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr;
270 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr;
271
272 // Helper method to do generation of RunTimeStub's
273 address OptoRuntime::generate_stub(ciEnv* env,
274 TypeFunc_generator gen, address C_function,
275 const char *name, StubId stub_id,
276 int is_fancy_jump, bool pass_tls,
277 bool return_pc) {
278
279 // Matching the default directive, we currently have no method to match.
280 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization));
281 CompilationMemoryStatisticMark cmsm(directive);
282 ResourceMark rm;
283 Compile C(env, gen, C_function, name, stub_id, is_fancy_jump, pass_tls, return_pc, directive);
284 DirectivesStack::release(directive);
285 return C.stub_entry_point();
286 }
287
288 const char* OptoRuntime::stub_name(address entry) {
289 #ifndef PRODUCT
290 CodeBlob* cb = CodeCache::find_blob(entry);
291 RuntimeStub* rs =(RuntimeStub *)cb;
292 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
293 return rs->name();
294 #else
295 // Fast implementation for product mode (maybe it should be inlined too)
296 return "runtime stub";
297 #endif
298 }
299
300 // local methods passed as arguments to stub generator that forward
301 // control to corresponding JRT methods of SharedRuntime
302
303 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
304 oopDesc* dest, jint dest_pos,
305 jint length, JavaThread* thread) {
306 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
307 }
308
309 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
310 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
311 }
312
313
314 //=============================================================================
315 // Opto compiler runtime routines
316 //=============================================================================
317
318
319 //=============================allocation======================================
320 // We failed the fast-path allocation. Now we need to do a scavenge or GC
321 // and try allocation again.
322
323 // object allocation
324 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, bool is_larval, JavaThread* current))
325 JRT_BLOCK;
326 #ifndef PRODUCT
327 SharedRuntime::_new_instance_ctr++; // new instance requires GC
328 #endif
329 assert(check_compiled_frame(current), "incorrect caller");
330
331 // These checks are cheap to make and support reflective allocation.
332 int lh = klass->layout_helper();
333 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
334 Handle holder(current, klass->klass_holder()); // keep the klass alive
335 klass->check_valid_for_instantiation(false, THREAD);
336 if (!HAS_PENDING_EXCEPTION) {
337 InstanceKlass::cast(klass)->initialize(THREAD);
338 }
339 }
340
341 if (!HAS_PENDING_EXCEPTION) {
342 // Scavenge and allocate an instance.
343 Handle holder(current, klass->klass_holder()); // keep the klass alive
344 instanceOop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
345 if (is_larval) {
346 // Check if this is a larval buffer allocation
347 result->set_mark(result->mark().enter_larval_state());
348 }
349 current->set_vm_result_oop(result);
350
351 // Pass oops back through thread local storage. Our apparent type to Java
352 // is that we return an oop, but we can block on exit from this routine and
353 // a GC can trash the oop in C's return register. The generated stub will
354 // fetch the oop from TLS after any possible GC.
355 }
356
357 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
358 JRT_BLOCK_END;
359
360 // inform GC that we won't do card marks for initializing writes.
361 SharedRuntime::on_slowpath_allocation_exit(current);
362 JRT_END
363
364
365 // array allocation
366 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, oopDesc* init_val, JavaThread* current))
367 JRT_BLOCK;
368 #ifndef PRODUCT
369 SharedRuntime::_new_array_ctr++; // new array requires GC
370 #endif
371 assert(check_compiled_frame(current), "incorrect caller");
372
373 // Scavenge and allocate an instance.
374 oop result;
375 Handle h_init_val(current, init_val); // keep the init_val object alive
376
377 if (array_type->is_flatArray_klass()) {
378 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
379 FlatArrayKlass* fak = FlatArrayKlass::cast(array_type);
380 InlineKlass* vk = fak->element_klass();
381 ArrayKlass::ArrayProperties props = ArrayKlass::array_properties_from_layout(fak->layout_kind());
382 result = oopFactory::new_flatArray(vk, len, props, fak->layout_kind(), THREAD);
383 if (array_type->is_null_free_array_klass() && !h_init_val.is_null()) {
384 // Null-free arrays need to be initialized
385 for (int i = 0; i < len; i++) {
386 vk->write_value_to_addr(h_init_val(), ((flatArrayOop)result)->value_at_addr(i, fak->layout_helper()), fak->layout_kind(), true, CHECK);
387 }
388 }
389 } else if (array_type->is_typeArray_klass()) {
390 // The oopFactory likes to work with the element type.
391 // (We could bypass the oopFactory, since it doesn't add much value.)
392 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
393 result = oopFactory::new_typeArray(elem_type, len, THREAD);
394 } else {
395 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
396 result = oopFactory::new_refArray(array_type, len, THREAD);
397 if (array_type->is_null_free_array_klass() && !h_init_val.is_null()) {
398 // Null-free arrays need to be initialized
399 for (int i = 0; i < len; i++) {
400 ((objArrayOop)result)->obj_at_put(i, h_init_val());
401 }
402 }
403 }
404
405 // Pass oops back through thread local storage. Our apparent type to Java
406 // is that we return an oop, but we can block on exit from this routine and
407 // a GC can trash the oop in C's return register. The generated stub will
408 // fetch the oop from TLS after any possible GC.
409 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
410 current->set_vm_result_oop(result);
411 JRT_BLOCK_END;
412
413 // inform GC that we won't do card marks for initializing writes.
414 SharedRuntime::on_slowpath_allocation_exit(current);
415 JRT_END
416
417 // array allocation without zeroing
418 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
419 JRT_BLOCK;
420 #ifndef PRODUCT
421 SharedRuntime::_new_array_ctr++; // new array requires GC
422 #endif
423 assert(check_compiled_frame(current), "incorrect caller");
424
425 // Scavenge and allocate an instance.
426 oop result;
427
428 assert(array_type->is_typeArray_klass(), "should be called only for type array");
429 // The oopFactory likes to work with the element type.
430 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
431 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
432
433 // Pass oops back through thread local storage. Our apparent type to Java
434 // is that we return an oop, but we can block on exit from this routine and
435 // a GC can trash the oop in C's return register. The generated stub will
436 // fetch the oop from TLS after any possible GC.
437 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
438 current->set_vm_result_oop(result);
439 JRT_BLOCK_END;
440
441
442 // inform GC that we won't do card marks for initializing writes.
443 SharedRuntime::on_slowpath_allocation_exit(current);
444
445 oop result = current->vm_result_oop();
446 if ((len > 0) && (result != nullptr) &&
447 is_deoptimized_caller_frame(current)) {
448 // Zero array here if the caller is deoptimized.
449 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result);
450 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
451 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
452 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
453 HeapWord* obj = cast_from_oop<HeapWord*>(result);
454 if (!is_aligned(hs_bytes, BytesPerLong)) {
455 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
456 hs_bytes += BytesPerInt;
457 }
458
459 // Optimized zeroing.
460 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
461 const size_t aligned_hs = hs_bytes / BytesPerLong;
462 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
463 }
464
465 JRT_END
466
467 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
468
469 // multianewarray for 2 dimensions
470 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
471 #ifndef PRODUCT
472 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
473 #endif
474 assert(check_compiled_frame(current), "incorrect caller");
475 assert(elem_type->is_klass(), "not a class");
476 jint dims[2];
477 dims[0] = len1;
478 dims[1] = len2;
479 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
480 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
481 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
482 current->set_vm_result_oop(obj);
483 JRT_END
484
485 // multianewarray for 3 dimensions
486 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
487 #ifndef PRODUCT
488 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
489 #endif
490 assert(check_compiled_frame(current), "incorrect caller");
491 assert(elem_type->is_klass(), "not a class");
492 jint dims[3];
493 dims[0] = len1;
494 dims[1] = len2;
495 dims[2] = len3;
496 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
497 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
498 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
499 current->set_vm_result_oop(obj);
500 JRT_END
501
502 // multianewarray for 4 dimensions
503 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
504 #ifndef PRODUCT
505 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
506 #endif
507 assert(check_compiled_frame(current), "incorrect caller");
508 assert(elem_type->is_klass(), "not a class");
509 jint dims[4];
510 dims[0] = len1;
511 dims[1] = len2;
512 dims[2] = len3;
513 dims[3] = len4;
514 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
515 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
516 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
517 current->set_vm_result_oop(obj);
518 JRT_END
519
520 // multianewarray for 5 dimensions
521 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
522 #ifndef PRODUCT
523 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
524 #endif
525 assert(check_compiled_frame(current), "incorrect caller");
526 assert(elem_type->is_klass(), "not a class");
527 jint dims[5];
528 dims[0] = len1;
529 dims[1] = len2;
530 dims[2] = len3;
531 dims[3] = len4;
532 dims[4] = len5;
533 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
534 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
535 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
536 current->set_vm_result_oop(obj);
537 JRT_END
538
539 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
540 assert(check_compiled_frame(current), "incorrect caller");
541 assert(elem_type->is_klass(), "not a class");
542 assert(oop(dims)->is_typeArray(), "not an array");
543
544 ResourceMark rm;
545 jint len = dims->length();
546 assert(len > 0, "Dimensions array should contain data");
547 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
548 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
549 c_dims, len);
550
551 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
552 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
553 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
554 current->set_vm_result_oop(obj);
555 JRT_END
556
557 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
558
559 // Very few notify/notifyAll operations find any threads on the waitset, so
560 // the dominant fast-path is to simply return.
561 // Relatedly, it's critical that notify/notifyAll be fast in order to
562 // reduce lock hold times.
563 if (!SafepointSynchronize::is_synchronizing()) {
564 if (ObjectSynchronizer::quick_notify(obj, current, false)) {
565 return;
566 }
567 }
568
569 // This is the case the fast-path above isn't provisioned to handle.
570 // The fast-path is designed to handle frequently arising cases in an efficient manner.
571 // (The fast-path is just a degenerate variant of the slow-path).
572 // Perform the dreaded state transition and pass control into the slow-path.
573 JRT_BLOCK;
574 Handle h_obj(current, obj);
575 ObjectSynchronizer::notify(h_obj, CHECK);
576 JRT_BLOCK_END;
577 JRT_END
578
579 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
580
581 if (!SafepointSynchronize::is_synchronizing() ) {
582 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
583 return;
584 }
585 }
586
587 // This is the case the fast-path above isn't provisioned to handle.
588 // The fast-path is designed to handle frequently arising cases in an efficient manner.
589 // (The fast-path is just a degenerate variant of the slow-path).
590 // Perform the dreaded state transition and pass control into the slow-path.
591 JRT_BLOCK;
592 Handle h_obj(current, obj);
593 ObjectSynchronizer::notifyall(h_obj, CHECK);
594 JRT_BLOCK_END;
595 JRT_END
596
597 static const TypeFunc* make_new_instance_Type() {
598 // create input type (domain)
599 const Type **fields = TypeTuple::fields(2);
600 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
601 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // is_larval
602 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
603
604 // create result type (range)
605 fields = TypeTuple::fields(1);
606 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
607
608 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
609
610 return TypeFunc::make(domain, range);
611 }
612
613 #if INCLUDE_JVMTI
614 static const TypeFunc* make_notify_jvmti_vthread_Type() {
615 // create input type (domain)
616 const Type **fields = TypeTuple::fields(2);
617 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
618 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
619 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
620
621 // no result type needed
622 fields = TypeTuple::fields(1);
623 fields[TypeFunc::Parms+0] = nullptr; // void
624 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
625
626 return TypeFunc::make(domain,range);
627 }
628 #endif
629
630 static const TypeFunc* make_athrow_Type() {
631 // create input type (domain)
632 const Type **fields = TypeTuple::fields(1);
633 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
634 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
635
636 // create result type (range)
637 fields = TypeTuple::fields(0);
638
639 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
640
641 return TypeFunc::make(domain, range);
642 }
643
644 static const TypeFunc* make_new_array_Type() {
645 // create input type (domain)
646 const Type **fields = TypeTuple::fields(3);
647 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
648 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
649 fields[TypeFunc::Parms+2] = TypeInstPtr::NOTNULL; // init value
650 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
651
652 // create result type (range)
653 fields = TypeTuple::fields(1);
654 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
655
656 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
657
658 return TypeFunc::make(domain, range);
659 }
660
661 static const TypeFunc* make_new_array_nozero_Type() {
662 // create input type (domain)
663 const Type **fields = TypeTuple::fields(2);
664 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
665 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
666 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
667
668 // create result type (range)
669 fields = TypeTuple::fields(1);
670 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
671
672 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
673
674 return TypeFunc::make(domain, range);
675 }
676
677 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) {
678 // create input type (domain)
679 const int nargs = ndim + 1;
680 const Type **fields = TypeTuple::fields(nargs);
681 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
682 for( int i = 1; i < nargs; i++ )
683 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
684 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
685
686 // create result type (range)
687 fields = TypeTuple::fields(1);
688 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
689 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
690
691 return TypeFunc::make(domain, range);
692 }
693
694 static const TypeFunc* make_multianewarrayN_Type() {
695 // create input type (domain)
696 const Type **fields = TypeTuple::fields(2);
697 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
698 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
699 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
700
701 // create result type (range)
702 fields = TypeTuple::fields(1);
703 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
704 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
705
706 return TypeFunc::make(domain, range);
707 }
708
709 static const TypeFunc* make_uncommon_trap_Type() {
710 // create input type (domain)
711 const Type **fields = TypeTuple::fields(1);
712 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
713 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
714
715 // create result type (range)
716 fields = TypeTuple::fields(0);
717 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
718
719 return TypeFunc::make(domain, range);
720 }
721
722 //-----------------------------------------------------------------------------
723 // Monitor Handling
724
725 static const TypeFunc* make_complete_monitor_enter_Type() {
726 // create input type (domain)
727 const Type **fields = TypeTuple::fields(2);
728 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
729 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
730 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
731
732 // create result type (range)
733 fields = TypeTuple::fields(0);
734
735 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
736
737 return TypeFunc::make(domain, range);
738 }
739
740 //-----------------------------------------------------------------------------
741
742 static const TypeFunc* make_complete_monitor_exit_Type() {
743 // create input type (domain)
744 const Type **fields = TypeTuple::fields(3);
745 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
746 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
747 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
748 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
749
750 // create result type (range)
751 fields = TypeTuple::fields(0);
752
753 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
754
755 return TypeFunc::make(domain, range);
756 }
757
758 static const TypeFunc* make_monitor_notify_Type() {
759 // create input type (domain)
760 const Type **fields = TypeTuple::fields(1);
761 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
762 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
763
764 // create result type (range)
765 fields = TypeTuple::fields(0);
766 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
767 return TypeFunc::make(domain, range);
768 }
769
770 static const TypeFunc* make_flush_windows_Type() {
771 // create input type (domain)
772 const Type** fields = TypeTuple::fields(1);
773 fields[TypeFunc::Parms+0] = nullptr; // void
774 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
775
776 // create result type
777 fields = TypeTuple::fields(1);
778 fields[TypeFunc::Parms+0] = nullptr; // void
779 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
780
781 return TypeFunc::make(domain, range);
782 }
783
784 static const TypeFunc* make_l2f_Type() {
785 // create input type (domain)
786 const Type **fields = TypeTuple::fields(2);
787 fields[TypeFunc::Parms+0] = TypeLong::LONG;
788 fields[TypeFunc::Parms+1] = Type::HALF;
789 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
790
791 // create result type (range)
792 fields = TypeTuple::fields(1);
793 fields[TypeFunc::Parms+0] = Type::FLOAT;
794 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
795
796 return TypeFunc::make(domain, range);
797 }
798
799 static const TypeFunc* make_modf_Type() {
800 const Type **fields = TypeTuple::fields(2);
801 fields[TypeFunc::Parms+0] = Type::FLOAT;
802 fields[TypeFunc::Parms+1] = Type::FLOAT;
803 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
804
805 // create result type (range)
806 fields = TypeTuple::fields(1);
807 fields[TypeFunc::Parms+0] = Type::FLOAT;
808
809 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
810
811 return TypeFunc::make(domain, range);
812 }
813
814 static const TypeFunc* make_Math_D_D_Type() {
815 // create input type (domain)
816 const Type **fields = TypeTuple::fields(2);
817 // Symbol* name of class to be loaded
818 fields[TypeFunc::Parms+0] = Type::DOUBLE;
819 fields[TypeFunc::Parms+1] = Type::HALF;
820 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
821
822 // create result type (range)
823 fields = TypeTuple::fields(2);
824 fields[TypeFunc::Parms+0] = Type::DOUBLE;
825 fields[TypeFunc::Parms+1] = Type::HALF;
826 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
827
828 return TypeFunc::make(domain, range);
829 }
830
831 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) {
832 // create input type (domain)
833 const Type **fields = TypeTuple::fields(num_arg);
834 // Symbol* name of class to be loaded
835 assert(num_arg > 0, "must have at least 1 input");
836 for (uint i = 0; i < num_arg; i++) {
837 fields[TypeFunc::Parms+i] = in_type;
838 }
839 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields);
840
841 // create result type (range)
842 const uint num_ret = 1;
843 fields = TypeTuple::fields(num_ret);
844 fields[TypeFunc::Parms+0] = out_type;
845 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields);
846
847 return TypeFunc::make(domain, range);
848 }
849
850 static const TypeFunc* make_Math_DD_D_Type() {
851 const Type **fields = TypeTuple::fields(4);
852 fields[TypeFunc::Parms+0] = Type::DOUBLE;
853 fields[TypeFunc::Parms+1] = Type::HALF;
854 fields[TypeFunc::Parms+2] = Type::DOUBLE;
855 fields[TypeFunc::Parms+3] = Type::HALF;
856 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
857
858 // create result type (range)
859 fields = TypeTuple::fields(2);
860 fields[TypeFunc::Parms+0] = Type::DOUBLE;
861 fields[TypeFunc::Parms+1] = Type::HALF;
862 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
863
864 return TypeFunc::make(domain, range);
865 }
866
867 //-------------- currentTimeMillis, currentTimeNanos, etc
868
869 static const TypeFunc* make_void_long_Type() {
870 // create input type (domain)
871 const Type **fields = TypeTuple::fields(0);
872 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
873
874 // create result type (range)
875 fields = TypeTuple::fields(2);
876 fields[TypeFunc::Parms+0] = TypeLong::LONG;
877 fields[TypeFunc::Parms+1] = Type::HALF;
878 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
879
880 return TypeFunc::make(domain, range);
881 }
882
883 static const TypeFunc* make_void_void_Type() {
884 // create input type (domain)
885 const Type **fields = TypeTuple::fields(0);
886 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
887
888 // create result type (range)
889 fields = TypeTuple::fields(0);
890 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
891 return TypeFunc::make(domain, range);
892 }
893
894 static const TypeFunc* make_jfr_write_checkpoint_Type() {
895 // create input type (domain)
896 const Type **fields = TypeTuple::fields(0);
897 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
898
899 // create result type (range)
900 fields = TypeTuple::fields(1);
901 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
902 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 1, fields);
903 return TypeFunc::make(domain, range);
904 }
905
906
907 // Takes as parameters:
908 // void *dest
909 // long size
910 // uchar byte
911
912 static const TypeFunc* make_setmemory_Type() {
913 // create input type (domain)
914 int argcnt = NOT_LP64(3) LP64_ONLY(4);
915 const Type** fields = TypeTuple::fields(argcnt);
916 int argp = TypeFunc::Parms;
917 fields[argp++] = TypePtr::NOTNULL; // dest
918 fields[argp++] = TypeX_X; // size
919 LP64_ONLY(fields[argp++] = Type::HALF); // size
920 fields[argp++] = TypeInt::UBYTE; // bytevalue
921 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
922 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
923
924 // no result type needed
925 fields = TypeTuple::fields(1);
926 fields[TypeFunc::Parms+0] = nullptr; // void
927 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
928 return TypeFunc::make(domain, range);
929 }
930
931 // arraycopy stub variations:
932 enum ArrayCopyType {
933 ac_fast, // void(ptr, ptr, size_t)
934 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
935 ac_slow, // void(ptr, int, ptr, int, int)
936 ac_generic // int(ptr, int, ptr, int, int)
937 };
938
939 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
940 // create input type (domain)
941 int num_args = (act == ac_fast ? 3 : 5);
942 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
943 int argcnt = num_args;
944 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
945 const Type** fields = TypeTuple::fields(argcnt);
946 int argp = TypeFunc::Parms;
947 fields[argp++] = TypePtr::NOTNULL; // src
948 if (num_size_args == 0) {
949 fields[argp++] = TypeInt::INT; // src_pos
950 }
951 fields[argp++] = TypePtr::NOTNULL; // dest
952 if (num_size_args == 0) {
953 fields[argp++] = TypeInt::INT; // dest_pos
954 fields[argp++] = TypeInt::INT; // length
955 }
956 while (num_size_args-- > 0) {
957 fields[argp++] = TypeX_X; // size in whatevers (size_t)
958 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
959 }
960 if (act == ac_checkcast) {
961 fields[argp++] = TypePtr::NOTNULL; // super_klass
962 }
963 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
964 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
965
966 // create result type if needed
967 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
968 fields = TypeTuple::fields(1);
969 if (retcnt == 0)
970 fields[TypeFunc::Parms+0] = nullptr; // void
971 else
972 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
973 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
974 return TypeFunc::make(domain, range);
975 }
976
977 static const TypeFunc* make_array_fill_Type() {
978 const Type** fields;
979 int argp = TypeFunc::Parms;
980 // create input type (domain): pointer, int, size_t
981 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
982 fields[argp++] = TypePtr::NOTNULL;
983 fields[argp++] = TypeInt::INT;
984 fields[argp++] = TypeX_X; // size in whatevers (size_t)
985 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
986 const TypeTuple *domain = TypeTuple::make(argp, fields);
987
988 // create result type
989 fields = TypeTuple::fields(1);
990 fields[TypeFunc::Parms+0] = nullptr; // void
991 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
992
993 return TypeFunc::make(domain, range);
994 }
995
996 static const TypeFunc* make_array_partition_Type() {
997 // create input type (domain)
998 int num_args = 7;
999 int argcnt = num_args;
1000 const Type** fields = TypeTuple::fields(argcnt);
1001 int argp = TypeFunc::Parms;
1002 fields[argp++] = TypePtr::NOTNULL; // array
1003 fields[argp++] = TypeInt::INT; // element type
1004 fields[argp++] = TypeInt::INT; // low
1005 fields[argp++] = TypeInt::INT; // end
1006 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array)
1007 fields[argp++] = TypeInt::INT; // indexPivot1
1008 fields[argp++] = TypeInt::INT; // indexPivot2
1009 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1010 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1011
1012 // no result type needed
1013 fields = TypeTuple::fields(1);
1014 fields[TypeFunc::Parms+0] = nullptr; // void
1015 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1016 return TypeFunc::make(domain, range);
1017 }
1018
1019 static const TypeFunc* make_array_sort_Type() {
1020 // create input type (domain)
1021 int num_args = 4;
1022 int argcnt = num_args;
1023 const Type** fields = TypeTuple::fields(argcnt);
1024 int argp = TypeFunc::Parms;
1025 fields[argp++] = TypePtr::NOTNULL; // array
1026 fields[argp++] = TypeInt::INT; // element type
1027 fields[argp++] = TypeInt::INT; // fromIndex
1028 fields[argp++] = TypeInt::INT; // toIndex
1029 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1030 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1031
1032 // no result type needed
1033 fields = TypeTuple::fields(1);
1034 fields[TypeFunc::Parms+0] = nullptr; // void
1035 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1036 return TypeFunc::make(domain, range);
1037 }
1038
1039 static const TypeFunc* make_aescrypt_block_Type() {
1040 // create input type (domain)
1041 int num_args = 3;
1042 int argcnt = num_args;
1043 const Type** fields = TypeTuple::fields(argcnt);
1044 int argp = TypeFunc::Parms;
1045 fields[argp++] = TypePtr::NOTNULL; // src
1046 fields[argp++] = TypePtr::NOTNULL; // dest
1047 fields[argp++] = TypePtr::NOTNULL; // k array
1048 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1049 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1050
1051 // no result type needed
1052 fields = TypeTuple::fields(1);
1053 fields[TypeFunc::Parms+0] = nullptr; // void
1054 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1055 return TypeFunc::make(domain, range);
1056 }
1057
1058 static const TypeFunc* make_updateBytesCRC32_Type() {
1059 // create input type (domain)
1060 int num_args = 3;
1061 int argcnt = num_args;
1062 const Type** fields = TypeTuple::fields(argcnt);
1063 int argp = TypeFunc::Parms;
1064 fields[argp++] = TypeInt::INT; // crc
1065 fields[argp++] = TypePtr::NOTNULL; // src
1066 fields[argp++] = TypeInt::INT; // len
1067 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1068 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1069
1070 // result type needed
1071 fields = TypeTuple::fields(1);
1072 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1073 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1074 return TypeFunc::make(domain, range);
1075 }
1076
1077 static const TypeFunc* make_updateBytesCRC32C_Type() {
1078 // create input type (domain)
1079 int num_args = 4;
1080 int argcnt = num_args;
1081 const Type** fields = TypeTuple::fields(argcnt);
1082 int argp = TypeFunc::Parms;
1083 fields[argp++] = TypeInt::INT; // crc
1084 fields[argp++] = TypePtr::NOTNULL; // buf
1085 fields[argp++] = TypeInt::INT; // len
1086 fields[argp++] = TypePtr::NOTNULL; // table
1087 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1088 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1089
1090 // result type needed
1091 fields = TypeTuple::fields(1);
1092 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1093 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1094 return TypeFunc::make(domain, range);
1095 }
1096
1097 static const TypeFunc* make_updateBytesAdler32_Type() {
1098 // create input type (domain)
1099 int num_args = 3;
1100 int argcnt = num_args;
1101 const Type** fields = TypeTuple::fields(argcnt);
1102 int argp = TypeFunc::Parms;
1103 fields[argp++] = TypeInt::INT; // crc
1104 fields[argp++] = TypePtr::NOTNULL; // src + offset
1105 fields[argp++] = TypeInt::INT; // len
1106 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1107 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1108
1109 // result type needed
1110 fields = TypeTuple::fields(1);
1111 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1112 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1113 return TypeFunc::make(domain, range);
1114 }
1115
1116 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() {
1117 // create input type (domain)
1118 int num_args = 5;
1119 int argcnt = num_args;
1120 const Type** fields = TypeTuple::fields(argcnt);
1121 int argp = TypeFunc::Parms;
1122 fields[argp++] = TypePtr::NOTNULL; // src
1123 fields[argp++] = TypePtr::NOTNULL; // dest
1124 fields[argp++] = TypePtr::NOTNULL; // k array
1125 fields[argp++] = TypePtr::NOTNULL; // r array
1126 fields[argp++] = TypeInt::INT; // src len
1127 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1128 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1129
1130 // returning cipher len (int)
1131 fields = TypeTuple::fields(1);
1132 fields[TypeFunc::Parms+0] = TypeInt::INT;
1133 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1134 return TypeFunc::make(domain, range);
1135 }
1136
1137 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() {
1138 // create input type (domain)
1139 int num_args = 4;
1140 int argcnt = num_args;
1141 const Type** fields = TypeTuple::fields(argcnt);
1142 int argp = TypeFunc::Parms;
1143 fields[argp++] = TypePtr::NOTNULL; // src
1144 fields[argp++] = TypePtr::NOTNULL; // dest
1145 fields[argp++] = TypePtr::NOTNULL; // k array
1146 fields[argp++] = TypeInt::INT; // src len
1147 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1148 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1149
1150 // returning cipher len (int)
1151 fields = TypeTuple::fields(1);
1152 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1153 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1154 return TypeFunc::make(domain, range);
1155 }
1156
1157 static const TypeFunc* make_counterMode_aescrypt_Type() {
1158 // create input type (domain)
1159 int num_args = 7;
1160 int argcnt = num_args;
1161 const Type** fields = TypeTuple::fields(argcnt);
1162 int argp = TypeFunc::Parms;
1163 fields[argp++] = TypePtr::NOTNULL; // src
1164 fields[argp++] = TypePtr::NOTNULL; // dest
1165 fields[argp++] = TypePtr::NOTNULL; // k array
1166 fields[argp++] = TypePtr::NOTNULL; // counter array
1167 fields[argp++] = TypeInt::INT; // src len
1168 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
1169 fields[argp++] = TypePtr::NOTNULL; // saved used addr
1170 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1171 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1172 // returning cipher len (int)
1173 fields = TypeTuple::fields(1);
1174 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1175 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1176 return TypeFunc::make(domain, range);
1177 }
1178
1179 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() {
1180 // create input type (domain)
1181 int num_args = 8;
1182 int argcnt = num_args;
1183 const Type** fields = TypeTuple::fields(argcnt);
1184 int argp = TypeFunc::Parms;
1185 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs
1186 fields[argp++] = TypeInt::INT; // int len
1187 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs
1188 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs
1189 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj
1190 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj
1191 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj
1192 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj
1193
1194 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1195 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1196 // returning cipher len (int)
1197 fields = TypeTuple::fields(1);
1198 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1199 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1200 return TypeFunc::make(domain, range);
1201 }
1202
1203 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) {
1204 // create input type (domain)
1205 int num_args = is_sha3 ? 3 : 2;
1206 int argcnt = num_args;
1207 const Type** fields = TypeTuple::fields(argcnt);
1208 int argp = TypeFunc::Parms;
1209 fields[argp++] = TypePtr::NOTNULL; // buf
1210 fields[argp++] = TypePtr::NOTNULL; // state
1211 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1212 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1213 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1214
1215 // no result type needed
1216 fields = TypeTuple::fields(1);
1217 fields[TypeFunc::Parms+0] = nullptr; // void
1218 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1219 return TypeFunc::make(domain, range);
1220 }
1221
1222 /*
1223 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
1224 */
1225 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) {
1226 // create input type (domain)
1227 int num_args = is_sha3 ? 5 : 4;
1228 int argcnt = num_args;
1229 const Type** fields = TypeTuple::fields(argcnt);
1230 int argp = TypeFunc::Parms;
1231 fields[argp++] = TypePtr::NOTNULL; // buf
1232 fields[argp++] = TypePtr::NOTNULL; // state
1233 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1234 fields[argp++] = TypeInt::INT; // ofs
1235 fields[argp++] = TypeInt::INT; // limit
1236 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1237 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1238
1239 // returning ofs (int)
1240 fields = TypeTuple::fields(1);
1241 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1242 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1243 return TypeFunc::make(domain, range);
1244 }
1245
1246 // SHAKE128Parallel doubleKeccak function
1247 static const TypeFunc* make_double_keccak_Type() {
1248 int argcnt = 2;
1249
1250 const Type** fields = TypeTuple::fields(argcnt);
1251 int argp = TypeFunc::Parms;
1252 fields[argp++] = TypePtr::NOTNULL; // status0
1253 fields[argp++] = TypePtr::NOTNULL; // status1
1254
1255 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1256 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1257
1258 // result type needed
1259 fields = TypeTuple::fields(1);
1260 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1261 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1262 return TypeFunc::make(domain, range);
1263 }
1264
1265 static const TypeFunc* make_multiplyToLen_Type() {
1266 // create input type (domain)
1267 int num_args = 5;
1268 int argcnt = num_args;
1269 const Type** fields = TypeTuple::fields(argcnt);
1270 int argp = TypeFunc::Parms;
1271 fields[argp++] = TypePtr::NOTNULL; // x
1272 fields[argp++] = TypeInt::INT; // xlen
1273 fields[argp++] = TypePtr::NOTNULL; // y
1274 fields[argp++] = TypeInt::INT; // ylen
1275 fields[argp++] = TypePtr::NOTNULL; // z
1276 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1277 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1278
1279 // no result type needed
1280 fields = TypeTuple::fields(1);
1281 fields[TypeFunc::Parms+0] = nullptr;
1282 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1283 return TypeFunc::make(domain, range);
1284 }
1285
1286 static const TypeFunc* make_squareToLen_Type() {
1287 // create input type (domain)
1288 int num_args = 4;
1289 int argcnt = num_args;
1290 const Type** fields = TypeTuple::fields(argcnt);
1291 int argp = TypeFunc::Parms;
1292 fields[argp++] = TypePtr::NOTNULL; // x
1293 fields[argp++] = TypeInt::INT; // len
1294 fields[argp++] = TypePtr::NOTNULL; // z
1295 fields[argp++] = TypeInt::INT; // zlen
1296 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1297 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1298
1299 // no result type needed
1300 fields = TypeTuple::fields(1);
1301 fields[TypeFunc::Parms+0] = nullptr;
1302 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1303 return TypeFunc::make(domain, range);
1304 }
1305
1306 static const TypeFunc* make_mulAdd_Type() {
1307 // create input type (domain)
1308 int num_args = 5;
1309 int argcnt = num_args;
1310 const Type** fields = TypeTuple::fields(argcnt);
1311 int argp = TypeFunc::Parms;
1312 fields[argp++] = TypePtr::NOTNULL; // out
1313 fields[argp++] = TypePtr::NOTNULL; // in
1314 fields[argp++] = TypeInt::INT; // offset
1315 fields[argp++] = TypeInt::INT; // len
1316 fields[argp++] = TypeInt::INT; // k
1317 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1318 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1319
1320 // returning carry (int)
1321 fields = TypeTuple::fields(1);
1322 fields[TypeFunc::Parms+0] = TypeInt::INT;
1323 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1324 return TypeFunc::make(domain, range);
1325 }
1326
1327 static const TypeFunc* make_montgomeryMultiply_Type() {
1328 // create input type (domain)
1329 int num_args = 7;
1330 int argcnt = num_args;
1331 const Type** fields = TypeTuple::fields(argcnt);
1332 int argp = TypeFunc::Parms;
1333 fields[argp++] = TypePtr::NOTNULL; // a
1334 fields[argp++] = TypePtr::NOTNULL; // b
1335 fields[argp++] = TypePtr::NOTNULL; // n
1336 fields[argp++] = TypeInt::INT; // len
1337 fields[argp++] = TypeLong::LONG; // inv
1338 fields[argp++] = Type::HALF;
1339 fields[argp++] = TypePtr::NOTNULL; // result
1340 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1341 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1342
1343 // result type needed
1344 fields = TypeTuple::fields(1);
1345 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1346
1347 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1348 return TypeFunc::make(domain, range);
1349 }
1350
1351 static const TypeFunc* make_montgomerySquare_Type() {
1352 // create input type (domain)
1353 int num_args = 6;
1354 int argcnt = num_args;
1355 const Type** fields = TypeTuple::fields(argcnt);
1356 int argp = TypeFunc::Parms;
1357 fields[argp++] = TypePtr::NOTNULL; // a
1358 fields[argp++] = TypePtr::NOTNULL; // n
1359 fields[argp++] = TypeInt::INT; // len
1360 fields[argp++] = TypeLong::LONG; // inv
1361 fields[argp++] = Type::HALF;
1362 fields[argp++] = TypePtr::NOTNULL; // result
1363 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1364 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1365
1366 // result type needed
1367 fields = TypeTuple::fields(1);
1368 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1369
1370 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1371 return TypeFunc::make(domain, range);
1372 }
1373
1374 static const TypeFunc* make_bigIntegerShift_Type() {
1375 int argcnt = 5;
1376 const Type** fields = TypeTuple::fields(argcnt);
1377 int argp = TypeFunc::Parms;
1378 fields[argp++] = TypePtr::NOTNULL; // newArr
1379 fields[argp++] = TypePtr::NOTNULL; // oldArr
1380 fields[argp++] = TypeInt::INT; // newIdx
1381 fields[argp++] = TypeInt::INT; // shiftCount
1382 fields[argp++] = TypeInt::INT; // numIter
1383 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1384 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1385
1386 // no result type needed
1387 fields = TypeTuple::fields(1);
1388 fields[TypeFunc::Parms + 0] = nullptr;
1389 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1390 return TypeFunc::make(domain, range);
1391 }
1392
1393 static const TypeFunc* make_vectorizedMismatch_Type() {
1394 // create input type (domain)
1395 int num_args = 4;
1396 int argcnt = num_args;
1397 const Type** fields = TypeTuple::fields(argcnt);
1398 int argp = TypeFunc::Parms;
1399 fields[argp++] = TypePtr::NOTNULL; // obja
1400 fields[argp++] = TypePtr::NOTNULL; // objb
1401 fields[argp++] = TypeInt::INT; // length, number of elements
1402 fields[argp++] = TypeInt::INT; // log2scale, element size
1403 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1404 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1405
1406 //return mismatch index (int)
1407 fields = TypeTuple::fields(1);
1408 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1409 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1410 return TypeFunc::make(domain, range);
1411 }
1412
1413 static const TypeFunc* make_ghash_processBlocks_Type() {
1414 int argcnt = 4;
1415
1416 const Type** fields = TypeTuple::fields(argcnt);
1417 int argp = TypeFunc::Parms;
1418 fields[argp++] = TypePtr::NOTNULL; // state
1419 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1420 fields[argp++] = TypePtr::NOTNULL; // data
1421 fields[argp++] = TypeInt::INT; // blocks
1422 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1423 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1424
1425 // result type needed
1426 fields = TypeTuple::fields(1);
1427 fields[TypeFunc::Parms+0] = nullptr; // void
1428 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1429 return TypeFunc::make(domain, range);
1430 }
1431
1432 static const TypeFunc* make_chacha20Block_Type() {
1433 int argcnt = 2;
1434
1435 const Type** fields = TypeTuple::fields(argcnt);
1436 int argp = TypeFunc::Parms;
1437 fields[argp++] = TypePtr::NOTNULL; // state
1438 fields[argp++] = TypePtr::NOTNULL; // result
1439
1440 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1441 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1442
1443 // result type needed
1444 fields = TypeTuple::fields(1);
1445 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int
1446 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1447 return TypeFunc::make(domain, range);
1448 }
1449
1450 // Kyber NTT function
1451 static const TypeFunc* make_kyberNtt_Type() {
1452 int argcnt = 2;
1453
1454 const Type** fields = TypeTuple::fields(argcnt);
1455 int argp = TypeFunc::Parms;
1456 fields[argp++] = TypePtr::NOTNULL; // coeffs
1457 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1458
1459 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1460 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1461
1462 // result type needed
1463 fields = TypeTuple::fields(1);
1464 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1465 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1466 return TypeFunc::make(domain, range);
1467 }
1468
1469 // Kyber inverse NTT function
1470 static const TypeFunc* make_kyberInverseNtt_Type() {
1471 int argcnt = 2;
1472
1473 const Type** fields = TypeTuple::fields(argcnt);
1474 int argp = TypeFunc::Parms;
1475 fields[argp++] = TypePtr::NOTNULL; // coeffs
1476 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1477
1478 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1479 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1480
1481 // result type needed
1482 fields = TypeTuple::fields(1);
1483 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1484 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1485 return TypeFunc::make(domain, range);
1486 }
1487
1488 // Kyber NTT multiply function
1489 static const TypeFunc* make_kyberNttMult_Type() {
1490 int argcnt = 4;
1491
1492 const Type** fields = TypeTuple::fields(argcnt);
1493 int argp = TypeFunc::Parms;
1494 fields[argp++] = TypePtr::NOTNULL; // result
1495 fields[argp++] = TypePtr::NOTNULL; // ntta
1496 fields[argp++] = TypePtr::NOTNULL; // nttb
1497 fields[argp++] = TypePtr::NOTNULL; // NTT multiply zetas
1498
1499 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1500 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1501
1502 // result type needed
1503 fields = TypeTuple::fields(1);
1504 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1505 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1506 return TypeFunc::make(domain, range);
1507 }
1508
1509 // Kyber add 2 polynomials function
1510 static const TypeFunc* make_kyberAddPoly_2_Type() {
1511 int argcnt = 3;
1512
1513 const Type** fields = TypeTuple::fields(argcnt);
1514 int argp = TypeFunc::Parms;
1515 fields[argp++] = TypePtr::NOTNULL; // result
1516 fields[argp++] = TypePtr::NOTNULL; // a
1517 fields[argp++] = TypePtr::NOTNULL; // b
1518
1519 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1520 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1521
1522 // result type needed
1523 fields = TypeTuple::fields(1);
1524 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1525 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1526 return TypeFunc::make(domain, range);
1527 }
1528
1529
1530 // Kyber add 3 polynomials function
1531 static const TypeFunc* make_kyberAddPoly_3_Type() {
1532 int argcnt = 4;
1533
1534 const Type** fields = TypeTuple::fields(argcnt);
1535 int argp = TypeFunc::Parms;
1536 fields[argp++] = TypePtr::NOTNULL; // result
1537 fields[argp++] = TypePtr::NOTNULL; // a
1538 fields[argp++] = TypePtr::NOTNULL; // b
1539 fields[argp++] = TypePtr::NOTNULL; // c
1540
1541 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1542 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1543
1544 // result type needed
1545 fields = TypeTuple::fields(1);
1546 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1547 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1548 return TypeFunc::make(domain, range);
1549 }
1550
1551
1552 // Kyber XOF output parsing into polynomial coefficients candidates
1553 // or decompress(12,...) function
1554 static const TypeFunc* make_kyber12To16_Type() {
1555 int argcnt = 4;
1556
1557 const Type** fields = TypeTuple::fields(argcnt);
1558 int argp = TypeFunc::Parms;
1559 fields[argp++] = TypePtr::NOTNULL; // condensed
1560 fields[argp++] = TypeInt::INT; // condensedOffs
1561 fields[argp++] = TypePtr::NOTNULL; // parsed
1562 fields[argp++] = TypeInt::INT; // parsedLength
1563
1564 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1565 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1566
1567 // result type needed
1568 fields = TypeTuple::fields(1);
1569 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1570 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1571 return TypeFunc::make(domain, range);
1572 }
1573
1574 // Kyber Barrett reduce function
1575 static const TypeFunc* make_kyberBarrettReduce_Type() {
1576 int argcnt = 1;
1577
1578 const Type** fields = TypeTuple::fields(argcnt);
1579 int argp = TypeFunc::Parms;
1580 fields[argp++] = TypePtr::NOTNULL; // coeffs
1581
1582 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1583 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1584
1585 // result type needed
1586 fields = TypeTuple::fields(1);
1587 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1588 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1589 return TypeFunc::make(domain, range);
1590 }
1591
1592 // Dilithium NTT function except for the final "normalization" to |coeff| < Q
1593 static const TypeFunc* make_dilithiumAlmostNtt_Type() {
1594 int argcnt = 2;
1595
1596 const Type** fields = TypeTuple::fields(argcnt);
1597 int argp = TypeFunc::Parms;
1598 fields[argp++] = TypePtr::NOTNULL; // coeffs
1599 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1600
1601 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1602 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1603
1604 // result type needed
1605 fields = TypeTuple::fields(1);
1606 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1607 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1608 return TypeFunc::make(domain, range);
1609 }
1610
1611 // Dilithium inverse NTT function except the final mod Q division by 2^256
1612 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() {
1613 int argcnt = 2;
1614
1615 const Type** fields = TypeTuple::fields(argcnt);
1616 int argp = TypeFunc::Parms;
1617 fields[argp++] = TypePtr::NOTNULL; // coeffs
1618 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1619
1620 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1621 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1622
1623 // result type needed
1624 fields = TypeTuple::fields(1);
1625 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1626 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1627 return TypeFunc::make(domain, range);
1628 }
1629
1630 // Dilithium NTT multiply function
1631 static const TypeFunc* make_dilithiumNttMult_Type() {
1632 int argcnt = 3;
1633
1634 const Type** fields = TypeTuple::fields(argcnt);
1635 int argp = TypeFunc::Parms;
1636 fields[argp++] = TypePtr::NOTNULL; // result
1637 fields[argp++] = TypePtr::NOTNULL; // ntta
1638 fields[argp++] = TypePtr::NOTNULL; // nttb
1639
1640 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1641 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1642
1643 // result type needed
1644 fields = TypeTuple::fields(1);
1645 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1646 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1647 return TypeFunc::make(domain, range);
1648 }
1649
1650 // Dilithium Montgomery multiply a polynome coefficient array by a constant
1651 static const TypeFunc* make_dilithiumMontMulByConstant_Type() {
1652 int argcnt = 2;
1653
1654 const Type** fields = TypeTuple::fields(argcnt);
1655 int argp = TypeFunc::Parms;
1656 fields[argp++] = TypePtr::NOTNULL; // coeffs
1657 fields[argp++] = TypeInt::INT; // constant multiplier
1658
1659 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1660 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1661
1662 // result type needed
1663 fields = TypeTuple::fields(1);
1664 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1665 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1666 return TypeFunc::make(domain, range);
1667 }
1668
1669 // Dilithium decompose polynomial
1670 static const TypeFunc* make_dilithiumDecomposePoly_Type() {
1671 int argcnt = 5;
1672
1673 const Type** fields = TypeTuple::fields(argcnt);
1674 int argp = TypeFunc::Parms;
1675 fields[argp++] = TypePtr::NOTNULL; // input
1676 fields[argp++] = TypePtr::NOTNULL; // lowPart
1677 fields[argp++] = TypePtr::NOTNULL; // highPart
1678 fields[argp++] = TypeInt::INT; // 2 * gamma2
1679 fields[argp++] = TypeInt::INT; // multiplier
1680
1681 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1682 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1683
1684 // result type needed
1685 fields = TypeTuple::fields(1);
1686 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1687 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1688 return TypeFunc::make(domain, range);
1689 }
1690
1691 static const TypeFunc* make_base64_encodeBlock_Type() {
1692 int argcnt = 6;
1693
1694 const Type** fields = TypeTuple::fields(argcnt);
1695 int argp = TypeFunc::Parms;
1696 fields[argp++] = TypePtr::NOTNULL; // src array
1697 fields[argp++] = TypeInt::INT; // offset
1698 fields[argp++] = TypeInt::INT; // length
1699 fields[argp++] = TypePtr::NOTNULL; // dest array
1700 fields[argp++] = TypeInt::INT; // dp
1701 fields[argp++] = TypeInt::BOOL; // isURL
1702 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1703 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1704
1705 // result type needed
1706 fields = TypeTuple::fields(1);
1707 fields[TypeFunc::Parms + 0] = nullptr; // void
1708 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1709 return TypeFunc::make(domain, range);
1710 }
1711
1712 static const TypeFunc* make_string_IndexOf_Type() {
1713 int argcnt = 4;
1714
1715 const Type** fields = TypeTuple::fields(argcnt);
1716 int argp = TypeFunc::Parms;
1717 fields[argp++] = TypePtr::NOTNULL; // haystack array
1718 fields[argp++] = TypeInt::INT; // haystack length
1719 fields[argp++] = TypePtr::NOTNULL; // needle array
1720 fields[argp++] = TypeInt::INT; // needle length
1721 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1722 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1723
1724 // result type needed
1725 fields = TypeTuple::fields(1);
1726 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack
1727 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1728 return TypeFunc::make(domain, range);
1729 }
1730
1731 static const TypeFunc* make_base64_decodeBlock_Type() {
1732 int argcnt = 7;
1733
1734 const Type** fields = TypeTuple::fields(argcnt);
1735 int argp = TypeFunc::Parms;
1736 fields[argp++] = TypePtr::NOTNULL; // src array
1737 fields[argp++] = TypeInt::INT; // src offset
1738 fields[argp++] = TypeInt::INT; // src length
1739 fields[argp++] = TypePtr::NOTNULL; // dest array
1740 fields[argp++] = TypeInt::INT; // dest offset
1741 fields[argp++] = TypeInt::BOOL; // isURL
1742 fields[argp++] = TypeInt::BOOL; // isMIME
1743 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1744 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1745
1746 // result type needed
1747 fields = TypeTuple::fields(1);
1748 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst
1749 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1750 return TypeFunc::make(domain, range);
1751 }
1752
1753 static const TypeFunc* make_poly1305_processBlocks_Type() {
1754 int argcnt = 4;
1755
1756 const Type** fields = TypeTuple::fields(argcnt);
1757 int argp = TypeFunc::Parms;
1758 fields[argp++] = TypePtr::NOTNULL; // input array
1759 fields[argp++] = TypeInt::INT; // input length
1760 fields[argp++] = TypePtr::NOTNULL; // accumulator array
1761 fields[argp++] = TypePtr::NOTNULL; // r array
1762 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1763 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1764
1765 // result type needed
1766 fields = TypeTuple::fields(1);
1767 fields[TypeFunc::Parms + 0] = nullptr; // void
1768 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1769 return TypeFunc::make(domain, range);
1770 }
1771
1772 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() {
1773 int argcnt = 3;
1774
1775 const Type** fields = TypeTuple::fields(argcnt);
1776 int argp = TypeFunc::Parms;
1777 fields[argp++] = TypePtr::NOTNULL; // a array
1778 fields[argp++] = TypePtr::NOTNULL; // b array
1779 fields[argp++] = TypePtr::NOTNULL; // r(esult) array
1780 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1781 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1782
1783 // result type needed
1784 fields = TypeTuple::fields(1);
1785 fields[TypeFunc::Parms + 0] = nullptr; // void
1786 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1787 return TypeFunc::make(domain, range);
1788 }
1789
1790 static const TypeFunc* make_intpoly_assign_Type() {
1791 int argcnt = 4;
1792
1793 const Type** fields = TypeTuple::fields(argcnt);
1794 int argp = TypeFunc::Parms;
1795 fields[argp++] = TypeInt::INT; // set flag
1796 fields[argp++] = TypePtr::NOTNULL; // a array (result)
1797 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set)
1798 fields[argp++] = TypeInt::INT; // array length
1799 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1800 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1801
1802 // result type needed
1803 fields = TypeTuple::fields(1);
1804 fields[TypeFunc::Parms + 0] = nullptr; // void
1805 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1806 return TypeFunc::make(domain, range);
1807 }
1808
1809 //------------- Interpreter state for on stack replacement
1810 static const TypeFunc* make_osr_end_Type() {
1811 // create input type (domain)
1812 const Type **fields = TypeTuple::fields(1);
1813 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1814 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1815
1816 // create result type
1817 fields = TypeTuple::fields(1);
1818 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1819 fields[TypeFunc::Parms+0] = nullptr; // void
1820 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1821 return TypeFunc::make(domain, range);
1822 }
1823
1824 #ifndef PRODUCT
1825 static void debug_print_convert_type(const Type** fields, int* argp, Node *parm) {
1826 const BasicType bt = parm->bottom_type()->basic_type();
1827 fields[(*argp)++] = Type::get_const_basic_type(bt);
1828 if (bt == T_LONG || bt == T_DOUBLE) {
1829 fields[(*argp)++] = Type::HALF;
1830 }
1831 }
1832
1833 static void update_arg_cnt(const Node* parm, int* arg_cnt) {
1834 (*arg_cnt)++;
1835 const BasicType bt = parm->bottom_type()->basic_type();
1836 if (bt == T_LONG || bt == T_DOUBLE) {
1837 (*arg_cnt)++;
1838 }
1839 }
1840
1841 const TypeFunc* OptoRuntime::debug_print_Type(Node* parm0, Node* parm1,
1842 Node* parm2, Node* parm3,
1843 Node* parm4, Node* parm5,
1844 Node* parm6) {
1845 int argcnt = 1;
1846 if (parm0 != nullptr) { update_arg_cnt(parm0, &argcnt);
1847 if (parm1 != nullptr) { update_arg_cnt(parm1, &argcnt);
1848 if (parm2 != nullptr) { update_arg_cnt(parm2, &argcnt);
1849 if (parm3 != nullptr) { update_arg_cnt(parm3, &argcnt);
1850 if (parm4 != nullptr) { update_arg_cnt(parm4, &argcnt);
1851 if (parm5 != nullptr) { update_arg_cnt(parm5, &argcnt);
1852 if (parm6 != nullptr) { update_arg_cnt(parm6, &argcnt);
1853 /* close each nested if ===> */ } } } } } } }
1854
1855 // create input type (domain)
1856 const Type** fields = TypeTuple::fields(argcnt);
1857 int argp = TypeFunc::Parms;
1858 fields[argp++] = TypePtr::NOTNULL; // static string pointer
1859
1860 if (parm0 != nullptr) { debug_print_convert_type(fields, &argp, parm0);
1861 if (parm1 != nullptr) { debug_print_convert_type(fields, &argp, parm1);
1862 if (parm2 != nullptr) { debug_print_convert_type(fields, &argp, parm2);
1863 if (parm3 != nullptr) { debug_print_convert_type(fields, &argp, parm3);
1864 if (parm4 != nullptr) { debug_print_convert_type(fields, &argp, parm4);
1865 if (parm5 != nullptr) { debug_print_convert_type(fields, &argp, parm5);
1866 if (parm6 != nullptr) { debug_print_convert_type(fields, &argp, parm6);
1867 /* close each nested if ===> */ } } } } } } }
1868
1869 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1870 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1871
1872 // no result type needed
1873 fields = TypeTuple::fields(1);
1874 fields[TypeFunc::Parms+0] = nullptr; // void
1875 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1876 return TypeFunc::make(domain, range);
1877 }
1878 #endif // PRODUCT
1879
1880 //-------------------------------------------------------------------------------------
1881 // register policy
1882
1883 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1884 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1885 switch (register_save_policy[reg]) {
1886 case 'C': return false; //SOC
1887 case 'E': return true ; //SOE
1888 case 'N': return false; //NS
1889 case 'A': return false; //AS
1890 }
1891 ShouldNotReachHere();
1892 return false;
1893 }
1894
1895 //-----------------------------------------------------------------------
1896 // Exceptions
1897 //
1898
1899 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1900
1901 // The method is an entry that is always called by a C++ method not
1902 // directly from compiled code. Compiled code will call the C++ method following.
1903 // We can't allow async exception to be installed during exception processing.
1904 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1905 // The frame we rethrow the exception to might not have been processed by the GC yet.
1906 // The stack watermark barrier takes care of detecting that and ensuring the frame
1907 // has updated oops.
1908 StackWatermarkSet::after_unwind(current);
1909
1910 // Do not confuse exception_oop with pending_exception. The exception_oop
1911 // is only used to pass arguments into the method. Not for general
1912 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1913 // the runtime stubs checks this on exit.
1914 assert(current->exception_oop() != nullptr, "exception oop is found");
1915 address handler_address = nullptr;
1916
1917 Handle exception(current, current->exception_oop());
1918 address pc = current->exception_pc();
1919
1920 // Clear out the exception oop and pc since looking up an
1921 // exception handler can cause class loading, which might throw an
1922 // exception and those fields are expected to be clear during
1923 // normal bytecode execution.
1924 current->clear_exception_oop_and_pc();
1925
1926 LogTarget(Info, exceptions) lt;
1927 if (lt.is_enabled()) {
1928 LogStream ls(lt);
1929 trace_exception(&ls, exception(), pc, "");
1930 }
1931
1932 // for AbortVMOnException flag
1933 Exceptions::debug_check_abort(exception);
1934
1935 #ifdef ASSERT
1936 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1937 // should throw an exception here
1938 ShouldNotReachHere();
1939 }
1940 #endif
1941
1942 // new exception handling: this method is entered only from adapters
1943 // exceptions from compiled java methods are handled in compiled code
1944 // using rethrow node
1945
1946 nm = CodeCache::find_nmethod(pc);
1947 assert(nm != nullptr, "No NMethod found");
1948 if (nm->is_native_method()) {
1949 fatal("Native method should not have path to exception handling");
1950 } else {
1951 // we are switching to old paradigm: search for exception handler in caller_frame
1952 // instead in exception handler of caller_frame.sender()
1953
1954 if (JvmtiExport::can_post_on_exceptions()) {
1955 // "Full-speed catching" is not necessary here,
1956 // since we're notifying the VM on every catch.
1957 // Force deoptimization and the rest of the lookup
1958 // will be fine.
1959 deoptimize_caller_frame(current);
1960 }
1961
1962 // Check the stack guard pages. If enabled, look for handler in this frame;
1963 // otherwise, forcibly unwind the frame.
1964 //
1965 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1966 bool force_unwind = !current->stack_overflow_state()->reguard_stack();
1967 bool deopting = false;
1968 if (nm->is_deopt_pc(pc)) {
1969 deopting = true;
1970 RegisterMap map(current,
1971 RegisterMap::UpdateMap::skip,
1972 RegisterMap::ProcessFrames::include,
1973 RegisterMap::WalkContinuation::skip);
1974 frame deoptee = current->last_frame().sender(&map);
1975 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1976 // Adjust the pc back to the original throwing pc
1977 pc = deoptee.pc();
1978 }
1979
1980 // If we are forcing an unwind because of stack overflow then deopt is
1981 // irrelevant since we are throwing the frame away anyway.
1982
1983 if (deopting && !force_unwind) {
1984 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1985 } else {
1986
1987 handler_address =
1988 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
1989
1990 if (handler_address == nullptr) {
1991 bool recursive_exception = false;
1992 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1993 assert (handler_address != nullptr, "must have compiled handler");
1994 // Update the exception cache only when the unwind was not forced
1995 // and there didn't happen another exception during the computation of the
1996 // compiled exception handler. Checking for exception oop equality is not
1997 // sufficient because some exceptions are pre-allocated and reused.
1998 if (!force_unwind && !recursive_exception) {
1999 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
2000 }
2001 } else {
2002 #ifdef ASSERT
2003 bool recursive_exception = false;
2004 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
2005 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
2006 p2i(handler_address), p2i(computed_address));
2007 #endif
2008 }
2009 }
2010
2011 current->set_exception_pc(pc);
2012 current->set_exception_handler_pc(handler_address);
2013 }
2014
2015 // Restore correct return pc. Was saved above.
2016 current->set_exception_oop(exception());
2017 return handler_address;
2018
2019 JRT_END
2020
2021 // We are entering here from exception_blob
2022 // If there is a compiled exception handler in this method, we will continue there;
2023 // otherwise we will unwind the stack and continue at the caller of top frame method
2024 // Note we enter without the usual JRT wrapper. We will call a helper routine that
2025 // will do the normal VM entry. We do it this way so that we can see if the nmethod
2026 // we looked up the handler for has been deoptimized in the meantime. If it has been
2027 // we must not use the handler and instead return the deopt blob.
2028 address OptoRuntime::handle_exception_C(JavaThread* current) {
2029 //
2030 // We are in Java not VM and in debug mode we have a NoHandleMark
2031 //
2032 #ifndef PRODUCT
2033 SharedRuntime::_find_handler_ctr++; // find exception handler
2034 #endif
2035 DEBUG_ONLY(NoHandleMark __hm;)
2036 nmethod* nm = nullptr;
2037 address handler_address = nullptr;
2038 {
2039 // Enter the VM
2040
2041 ResetNoHandleMark rnhm;
2042 handler_address = handle_exception_C_helper(current, nm);
2043 }
2044
2045 // Back in java: Use no oops, DON'T safepoint
2046
2047 // Now check to see if the handler we are returning is in a now
2048 // deoptimized frame
2049
2050 if (nm != nullptr) {
2051 RegisterMap map(current,
2052 RegisterMap::UpdateMap::skip,
2053 RegisterMap::ProcessFrames::skip,
2054 RegisterMap::WalkContinuation::skip);
2055 frame caller = current->last_frame().sender(&map);
2056 #ifdef ASSERT
2057 assert(caller.is_compiled_frame(), "must be");
2058 #endif // ASSERT
2059 if (caller.is_deoptimized_frame()) {
2060 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
2061 }
2062 }
2063 return handler_address;
2064 }
2065
2066 //------------------------------rethrow----------------------------------------
2067 // We get here after compiled code has executed a 'RethrowNode'. The callee
2068 // is either throwing or rethrowing an exception. The callee-save registers
2069 // have been restored, synchronized objects have been unlocked and the callee
2070 // stack frame has been removed. The return address was passed in.
2071 // Exception oop is passed as the 1st argument. This routine is then called
2072 // from the stub. On exit, we know where to jump in the caller's code.
2073 // After this C code exits, the stub will pop his frame and end in a jump
2074 // (instead of a return). We enter the caller's default handler.
2075 //
2076 // This must be JRT_LEAF:
2077 // - caller will not change its state as we cannot block on exit,
2078 // therefore raw_exception_handler_for_return_address is all it takes
2079 // to handle deoptimized blobs
2080 //
2081 // However, there needs to be a safepoint check in the middle! So compiled
2082 // safepoints are completely watertight.
2083 //
2084 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier.
2085 //
2086 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
2087 //
2088 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
2089 // ret_pc will have been loaded from the stack, so for AArch64 will be signed.
2090 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc));
2091
2092 #ifndef PRODUCT
2093 SharedRuntime::_rethrow_ctr++; // count rethrows
2094 #endif
2095 assert (exception != nullptr, "should have thrown a NullPointerException");
2096 #ifdef ASSERT
2097 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
2098 // should throw an exception here
2099 ShouldNotReachHere();
2100 }
2101 #endif
2102
2103 thread->set_vm_result_oop(exception);
2104 // Frame not compiled (handles deoptimization blob)
2105 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
2106 }
2107
2108 static const TypeFunc* make_rethrow_Type() {
2109 // create input type (domain)
2110 const Type **fields = TypeTuple::fields(1);
2111 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2112 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2113
2114 // create result type (range)
2115 fields = TypeTuple::fields(1);
2116 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2117 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
2118
2119 return TypeFunc::make(domain, range);
2120 }
2121
2122
2123 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
2124 // Deoptimize the caller before continuing, as the compiled
2125 // exception handler table may not be valid.
2126 if (DeoptimizeOnAllocationException && doit) {
2127 deoptimize_caller_frame(thread);
2128 }
2129 }
2130
2131 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
2132 // Called from within the owner thread, so no need for safepoint
2133 RegisterMap reg_map(thread,
2134 RegisterMap::UpdateMap::include,
2135 RegisterMap::ProcessFrames::include,
2136 RegisterMap::WalkContinuation::skip);
2137 frame stub_frame = thread->last_frame();
2138 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2139 frame caller_frame = stub_frame.sender(®_map);
2140
2141 // Deoptimize the caller frame.
2142 Deoptimization::deoptimize_frame(thread, caller_frame.id());
2143 }
2144
2145
2146 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
2147 // Called from within the owner thread, so no need for safepoint
2148 RegisterMap reg_map(thread,
2149 RegisterMap::UpdateMap::include,
2150 RegisterMap::ProcessFrames::include,
2151 RegisterMap::WalkContinuation::skip);
2152 frame stub_frame = thread->last_frame();
2153 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2154 frame caller_frame = stub_frame.sender(®_map);
2155 return caller_frame.is_deoptimized_frame();
2156 }
2157
2158 static const TypeFunc* make_register_finalizer_Type() {
2159 // create input type (domain)
2160 const Type **fields = TypeTuple::fields(1);
2161 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
2162 // // The JavaThread* is passed to each routine as the last argument
2163 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
2164 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2165
2166 // create result type (range)
2167 fields = TypeTuple::fields(0);
2168
2169 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2170
2171 return TypeFunc::make(domain, range);
2172 }
2173
2174 #if INCLUDE_JFR
2175 static const TypeFunc* make_class_id_load_barrier_Type() {
2176 // create input type (domain)
2177 const Type **fields = TypeTuple::fields(1);
2178 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
2179 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
2180
2181 // create result type (range)
2182 fields = TypeTuple::fields(0);
2183
2184 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
2185
2186 return TypeFunc::make(domain,range);
2187 }
2188 #endif // INCLUDE_JFR
2189
2190 //-----------------------------------------------------------------------------
2191 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
2192 // create input type (domain)
2193 const Type **fields = TypeTuple::fields(2);
2194 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2195 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
2196 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2197
2198 // create result type (range)
2199 fields = TypeTuple::fields(0);
2200
2201 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2202
2203 return TypeFunc::make(domain, range);
2204 }
2205
2206 static const TypeFunc* make_dtrace_object_alloc_Type() {
2207 // create input type (domain)
2208 const Type **fields = TypeTuple::fields(2);
2209 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2210 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
2211
2212 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2213
2214 // create result type (range)
2215 fields = TypeTuple::fields(0);
2216
2217 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2218
2219 return TypeFunc::make(domain, range);
2220 }
2221
2222 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
2223 assert(oopDesc::is_oop(obj), "must be a valid oop");
2224 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
2225 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
2226 JRT_END
2227
2228 //-----------------------------------------------------------------------------
2229
2230 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
2231
2232 //
2233 // dump the collected NamedCounters.
2234 //
2235 void OptoRuntime::print_named_counters() {
2236 int total_lock_count = 0;
2237 int eliminated_lock_count = 0;
2238
2239 NamedCounter* c = _named_counters;
2240 while (c) {
2241 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2242 int count = c->count();
2243 if (count > 0) {
2244 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2245 if (Verbose) {
2246 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2247 }
2248 total_lock_count += count;
2249 if (eliminated) {
2250 eliminated_lock_count += count;
2251 }
2252 }
2253 }
2254 c = c->next();
2255 }
2256 if (total_lock_count > 0) {
2257 tty->print_cr("dynamic locks: %d", total_lock_count);
2258 if (eliminated_lock_count) {
2259 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
2260 (int)(eliminated_lock_count * 100.0 / total_lock_count));
2261 }
2262 }
2263 }
2264
2265 //
2266 // Allocate a new NamedCounter. The JVMState is used to generate the
2267 // name which consists of method@line for the inlining tree.
2268 //
2269
2270 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
2271 int max_depth = youngest_jvms->depth();
2272
2273 // Visit scopes from youngest to oldest.
2274 bool first = true;
2275 stringStream st;
2276 for (int depth = max_depth; depth >= 1; depth--) {
2277 JVMState* jvms = youngest_jvms->of_depth(depth);
2278 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
2279 if (!first) {
2280 st.print(" ");
2281 } else {
2282 first = false;
2283 }
2284 int bci = jvms->bci();
2285 if (bci < 0) bci = 0;
2286 if (m != nullptr) {
2287 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
2288 } else {
2289 st.print("no method");
2290 }
2291 st.print("@%d", bci);
2292 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
2293 }
2294 NamedCounter* c = new NamedCounter(st.freeze(), tag);
2295
2296 // atomically add the new counter to the head of the list. We only
2297 // add counters so this is safe.
2298 NamedCounter* head;
2299 do {
2300 c->set_next(nullptr);
2301 head = _named_counters;
2302 c->set_next(head);
2303 } while (AtomicAccess::cmpxchg(&_named_counters, head, c) != head);
2304 return c;
2305 }
2306
2307 void OptoRuntime::initialize_types() {
2308 _new_instance_Type = make_new_instance_Type();
2309 _new_array_Type = make_new_array_Type();
2310 _new_array_nozero_Type = make_new_array_nozero_Type();
2311 _multianewarray2_Type = multianewarray_Type(2);
2312 _multianewarray3_Type = multianewarray_Type(3);
2313 _multianewarray4_Type = multianewarray_Type(4);
2314 _multianewarray5_Type = multianewarray_Type(5);
2315 _multianewarrayN_Type = make_multianewarrayN_Type();
2316 _complete_monitor_enter_Type = make_complete_monitor_enter_Type();
2317 _complete_monitor_exit_Type = make_complete_monitor_exit_Type();
2318 _monitor_notify_Type = make_monitor_notify_Type();
2319 _uncommon_trap_Type = make_uncommon_trap_Type();
2320 _athrow_Type = make_athrow_Type();
2321 _rethrow_Type = make_rethrow_Type();
2322 _Math_D_D_Type = make_Math_D_D_Type();
2323 _Math_DD_D_Type = make_Math_DD_D_Type();
2324 _modf_Type = make_modf_Type();
2325 _l2f_Type = make_l2f_Type();
2326 _void_long_Type = make_void_long_Type();
2327 _void_void_Type = make_void_void_Type();
2328 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type();
2329 _flush_windows_Type = make_flush_windows_Type();
2330 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast);
2331 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast);
2332 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic);
2333 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow);
2334 _unsafe_setmemory_Type = make_setmemory_Type();
2335 _array_fill_Type = make_array_fill_Type();
2336 _array_sort_Type = make_array_sort_Type();
2337 _array_partition_Type = make_array_partition_Type();
2338 _aescrypt_block_Type = make_aescrypt_block_Type();
2339 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type();
2340 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type();
2341 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type();
2342 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type();
2343 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true);
2344 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);;
2345 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true);
2346 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false);
2347 _double_keccak_Type = make_double_keccak_Type();
2348 _multiplyToLen_Type = make_multiplyToLen_Type();
2349 _montgomeryMultiply_Type = make_montgomeryMultiply_Type();
2350 _montgomerySquare_Type = make_montgomerySquare_Type();
2351 _squareToLen_Type = make_squareToLen_Type();
2352 _mulAdd_Type = make_mulAdd_Type();
2353 _bigIntegerShift_Type = make_bigIntegerShift_Type();
2354 _vectorizedMismatch_Type = make_vectorizedMismatch_Type();
2355 _ghash_processBlocks_Type = make_ghash_processBlocks_Type();
2356 _chacha20Block_Type = make_chacha20Block_Type();
2357 _kyberNtt_Type = make_kyberNtt_Type();
2358 _kyberInverseNtt_Type = make_kyberInverseNtt_Type();
2359 _kyberNttMult_Type = make_kyberNttMult_Type();
2360 _kyberAddPoly_2_Type = make_kyberAddPoly_2_Type();
2361 _kyberAddPoly_3_Type = make_kyberAddPoly_3_Type();
2362 _kyber12To16_Type = make_kyber12To16_Type();
2363 _kyberBarrettReduce_Type = make_kyberBarrettReduce_Type();
2364 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type();
2365 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type();
2366 _dilithiumNttMult_Type = make_dilithiumNttMult_Type();
2367 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type();
2368 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type();
2369 _base64_encodeBlock_Type = make_base64_encodeBlock_Type();
2370 _base64_decodeBlock_Type = make_base64_decodeBlock_Type();
2371 _string_IndexOf_Type = make_string_IndexOf_Type();
2372 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type();
2373 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type();
2374 _intpoly_assign_Type = make_intpoly_assign_Type();
2375 _updateBytesCRC32_Type = make_updateBytesCRC32_Type();
2376 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type();
2377 _updateBytesAdler32_Type = make_updateBytesAdler32_Type();
2378 _osr_end_Type = make_osr_end_Type();
2379 _register_finalizer_Type = make_register_finalizer_Type();
2380 JFR_ONLY(
2381 _class_id_load_barrier_Type = make_class_id_load_barrier_Type();
2382 )
2383 #if INCLUDE_JVMTI
2384 _notify_jvmti_vthread_Type = make_notify_jvmti_vthread_Type();
2385 #endif // INCLUDE_JVMTI
2386 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type();
2387 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type();
2388 }
2389
2390 int trace_exception_counter = 0;
2391 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2392 trace_exception_counter++;
2393 stringStream tempst;
2394
2395 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2396 exception_oop->print_value_on(&tempst);
2397 tempst.print(" in ");
2398 CodeBlob* blob = CodeCache::find_blob(exception_pc);
2399 if (blob->is_nmethod()) {
2400 blob->as_nmethod()->method()->print_value_on(&tempst);
2401 } else if (blob->is_runtime_stub()) {
2402 tempst.print("<runtime-stub>");
2403 } else {
2404 tempst.print("<unknown>");
2405 }
2406 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
2407 tempst.print("]");
2408
2409 st->print_raw_cr(tempst.freeze());
2410 }
2411
2412 const TypeFunc *OptoRuntime::store_inline_type_fields_Type() {
2413 // create input type (domain)
2414 uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2415 const Type **fields = TypeTuple::fields(total);
2416 // We don't know the number of returned values and their
2417 // types. Assume all registers available to the return convention
2418 // are used.
2419 fields[TypeFunc::Parms] = TypePtr::BOTTOM;
2420 uint i = 1;
2421 for (; i < SharedRuntime::java_return_convention_max_int; i++) {
2422 fields[TypeFunc::Parms+i] = TypeInt::INT;
2423 }
2424 for (; i < total; i+=2) {
2425 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2426 fields[TypeFunc::Parms+i+1] = Type::HALF;
2427 }
2428 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2429
2430 // create result type (range)
2431 fields = TypeTuple::fields(1);
2432 fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
2433
2434 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2435
2436 return TypeFunc::make(domain, range);
2437 }
2438
2439 const TypeFunc *OptoRuntime::pack_inline_type_Type() {
2440 // create input type (domain)
2441 uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2;
2442 const Type **fields = TypeTuple::fields(total);
2443 // We don't know the number of returned values and their
2444 // types. Assume all registers available to the return convention
2445 // are used.
2446 fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM;
2447 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;
2448 uint i = 2;
2449 for (; i < SharedRuntime::java_return_convention_max_int+1; i++) {
2450 fields[TypeFunc::Parms+i] = TypeInt::INT;
2451 }
2452 for (; i < total; i+=2) {
2453 fields[TypeFunc::Parms+i] = Type::DOUBLE;
2454 fields[TypeFunc::Parms+i+1] = Type::HALF;
2455 }
2456 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields);
2457
2458 // create result type (range)
2459 fields = TypeTuple::fields(1);
2460 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
2461
2462 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields);
2463
2464 return TypeFunc::make(domain, range);
2465 }
2466
2467 JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current))
2468 JRT_BLOCK;
2469 oop buffer = array->obj_at(index, THREAD);
2470 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
2471 current->set_vm_result_oop(buffer);
2472 JRT_BLOCK_END;
2473 JRT_END
2474
2475 const TypeFunc* OptoRuntime::load_unknown_inline_Type() {
2476 // create input type (domain)
2477 const Type** fields = TypeTuple::fields(2);
2478 fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL;
2479 fields[TypeFunc::Parms+1] = TypeInt::POS;
2480
2481 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+2, fields);
2482
2483 // create result type (range)
2484 fields = TypeTuple::fields(1);
2485 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
2486
2487 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
2488
2489 return TypeFunc::make(domain, range);
2490 }
2491
2492 JRT_BLOCK_ENTRY(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current))
2493 JRT_BLOCK;
2494 array->obj_at_put(index, buffer, THREAD);
2495 if (HAS_PENDING_EXCEPTION) {
2496 fatal("This entry must be changed to be a non-leaf entry because writing to a flat array can now throw an exception");
2497 }
2498 JRT_BLOCK_END;
2499 JRT_END
2500
2501 const TypeFunc* OptoRuntime::store_unknown_inline_Type() {
2502 // create input type (domain)
2503 const Type** fields = TypeTuple::fields(3);
2504 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL;
2505 fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL;
2506 fields[TypeFunc::Parms+2] = TypeInt::POS;
2507
2508 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields);
2509
2510 // create result type (range)
2511 fields = TypeTuple::fields(0);
2512 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
2513
2514 return TypeFunc::make(domain, range);
2515 }