1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmClasses.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/nmethod.hpp"
30 #include "code/pcDesc.hpp"
31 #include "code/scopeDesc.hpp"
32 #include "code/vtableStubs.hpp"
33 #include "compiler/compilationMemoryStatistic.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/g1/g1HeapRegion.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "interpreter/bytecode.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "interpreter/linkResolver.hpp"
44 #include "logging/log.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/oopFactory.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "oops/klass.inline.hpp"
49 #include "oops/objArrayKlass.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "oops/typeArrayOop.inline.hpp"
52 #include "opto/ad.hpp"
53 #include "opto/addnode.hpp"
54 #include "opto/callnode.hpp"
55 #include "opto/cfgnode.hpp"
56 #include "opto/graphKit.hpp"
57 #include "opto/machnode.hpp"
58 #include "opto/matcher.hpp"
59 #include "opto/memnode.hpp"
60 #include "opto/mulnode.hpp"
61 #include "opto/output.hpp"
62 #include "opto/runtime.hpp"
63 #include "opto/subnode.hpp"
64 #include "prims/jvmtiExport.hpp"
65 #include "runtime/atomicAccess.hpp"
66 #include "runtime/frame.inline.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/interfaceSupport.inline.hpp"
69 #include "runtime/java.hpp"
70 #include "runtime/javaCalls.hpp"
71 #include "runtime/mountUnmountDisabler.hpp"
72 #include "runtime/perfData.inline.hpp"
73 #include "runtime/sharedRuntime.hpp"
74 #include "runtime/signature.hpp"
75 #include "runtime/stackWatermarkSet.hpp"
76 #include "runtime/synchronizer.hpp"
77 #include "runtime/threadWXSetters.inline.hpp"
78 #include "runtime/vframe.hpp"
79 #include "runtime/vframe_hp.hpp"
80 #include "runtime/vframeArray.hpp"
81 #include "services/management.hpp"
82 #include "utilities/copy.hpp"
83 #include "utilities/preserveException.hpp"
84
85
86 // For debugging purposes:
87 // To force FullGCALot inside a runtime function, add the following two lines
88 //
89 // Universe::release_fullgc_alot_dummy();
90 // Universe::heap()->collect();
91 //
92 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
93
94
95 #define C2_BLOB_FIELD_DEFINE(name, type) \
96 type* OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr;
97 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
98 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \
99 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
100 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE)
101 #undef C2_BLOB_FIELD_DEFINE
102 #undef C2_STUB_FIELD_DEFINE
103
104 address OptoRuntime::_vtable_must_compile_Java = nullptr;
105
106 PerfCounter* _perf_OptoRuntime_class_init_barrier_redundant_count = nullptr;
107
108 // This should be called in an assertion at the start of OptoRuntime routines
109 // which are entered from compiled code (all of them)
110 #ifdef ASSERT
111 static bool check_compiled_frame(JavaThread* thread) {
112 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
113 RegisterMap map(thread,
114 RegisterMap::UpdateMap::skip,
115 RegisterMap::ProcessFrames::include,
116 RegisterMap::WalkContinuation::skip);
117 frame caller = thread->last_frame().sender(&map);
118 assert(caller.is_compiled_frame(), "not being called from compiled like code");
119 return true;
120 }
121 #endif // ASSERT
122
123 /*
124 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
125 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
126 if (var == nullptr) { return false; }
127 */
128
129 #define GEN_C2_BLOB(name, type) \
130 BLOB_FIELD_NAME(name) = \
131 generate_ ## name ## _blob(); \
132 if (BLOB_FIELD_NAME(name) == nullptr) { return false; }
133
134 // a few helper macros to conjure up generate_stub call arguments
135 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
136 #define C2_STUB_TYPEFUNC(name) name ## _Type
137 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C)
138 #define C2_STUB_ID(name) StubId:: JOIN3(c2, name, id)
139 #define C2_STUB_NAME(name) stub_name(C2_STUB_ID(name))
140
141 // Almost all the C functions targeted from the generated stubs are
142 // implemented locally to OptoRuntime with names that can be generated
143 // from the stub name by appending suffix '_C'. However, in two cases
144 // a common target method also needs to be called from shared runtime
145 // stubs. In these two cases the opto stubs rely on method
146 // imlementations defined in class SharedRuntime. The following
147 // defines temporarily rebind the generated names to reference the
148 // relevant implementations.
149
150 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \
151 C2_STUB_FIELD_NAME(name) = \
152 generate_stub(env, \
153 C2_STUB_TYPEFUNC(name), \
154 C2_STUB_C_FUNC(name), \
155 C2_STUB_NAME(name), \
156 C2_STUB_ID(name), \
157 fancy_jump, \
158 pass_tls, \
159 pass_retpc); \
160 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \
161
162 bool OptoRuntime::generate(ciEnv* env) {
163 init_counters();
164
165 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB)
166
167 return true;
168 }
169
170 #undef GEN_C2_BLOB
171
172 #undef C2_STUB_FIELD_NAME
173 #undef C2_STUB_TYPEFUNC
174 #undef C2_STUB_C_FUNC
175 #undef C2_STUB_NAME
176 #undef GEN_C2_STUB
177
178 // #undef gen
179
180 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr;
181 const TypeFunc* OptoRuntime::_new_array_Type = nullptr;
182 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr;
183 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr;
184 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr;
185 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr;
186 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr;
187 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr;
188 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr;
189 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr;
190 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr;
191 const TypeFunc* OptoRuntime::_athrow_Type = nullptr;
192 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr;
193 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr;
194 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr;
195 const TypeFunc* OptoRuntime::_modf_Type = nullptr;
196 const TypeFunc* OptoRuntime::_l2f_Type = nullptr;
197 const TypeFunc* OptoRuntime::_void_long_Type = nullptr;
198 const TypeFunc* OptoRuntime::_void_void_Type = nullptr;
199 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr;
200 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr;
201 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr;
202 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr;
203 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr;
204 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr;
205 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr;
206 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr;
207 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr;
208 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr;
209 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr;
210 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr;
211 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr;
212 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr;
213 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr;
214 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr;
215 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr;
216 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr;
217 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr;
218 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr;
219 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr;
220 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr;
221 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr;
222 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr;
223 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr;
224 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr;
225 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr;
226 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr;
227 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr;
228 const TypeFunc* OptoRuntime::_kyberNtt_Type = nullptr;
229 const TypeFunc* OptoRuntime::_kyberInverseNtt_Type = nullptr;
230 const TypeFunc* OptoRuntime::_kyberNttMult_Type = nullptr;
231 const TypeFunc* OptoRuntime::_kyberAddPoly_2_Type = nullptr;
232 const TypeFunc* OptoRuntime::_kyberAddPoly_3_Type = nullptr;
233 const TypeFunc* OptoRuntime::_kyber12To16_Type = nullptr;
234 const TypeFunc* OptoRuntime::_kyberBarrettReduce_Type = nullptr;
235 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr;
236 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr;
237 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr;
238 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr;
239 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr;
240 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr;
241 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr;
242 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr;
243 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr;
244 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr;
245 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr;
246 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr;
247 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr;
248 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr;
249 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr;
250 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr;
251 const TypeFunc* OptoRuntime::_vthread_transition_Type = nullptr;
252 #if INCLUDE_JFR
253 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr;
254 #endif // INCLUDE_JFR
255 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr;
256 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr;
257
258 // Helper method to do generation of RunTimeStub's
259 address OptoRuntime::generate_stub(ciEnv* env,
260 TypeFunc_generator gen, address C_function,
261 const char *name, StubId stub_id,
262 int is_fancy_jump, bool pass_tls,
263 bool return_pc) {
264
265 // Matching the default directive, we currently have no method to match.
266 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompilerThread::current()->compiler());
267 CompilationMemoryStatisticMark cmsm(directive);
268 ResourceMark rm;
269 Compile C(env, gen, C_function, name, stub_id, is_fancy_jump, pass_tls, return_pc, directive);
270 DirectivesStack::release(directive);
271 return C.stub_entry_point();
272 }
273
274 const char* OptoRuntime::stub_name(address entry) {
275 #ifndef PRODUCT
276 CodeBlob* cb = CodeCache::find_blob(entry);
277 RuntimeStub* rs =(RuntimeStub *)cb;
278 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
279 return rs->name();
280 #else
281 // Fast implementation for product mode (maybe it should be inlined too)
282 return "runtime stub";
283 #endif
284 }
285
286 // local methods passed as arguments to stub generator that forward
287 // control to corresponding JRT methods of SharedRuntime
288
289 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
290 oopDesc* dest, jint dest_pos,
291 jint length, JavaThread* thread) {
292 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread);
293 }
294
295 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
296 SharedRuntime::complete_monitor_locking_C(obj, lock, current);
297 }
298
299
300 //=============================================================================
301 // Opto compiler runtime routines
302 //=============================================================================
303
304
305 //=============================allocation======================================
306 // We failed the fast-path allocation. Now we need to do a scavenge or GC
307 // and try allocation again.
308
309 // object allocation
310 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_instance_C, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current))
311 JRT_BLOCK;
312 #ifndef PRODUCT
313 SharedRuntime::_new_instance_ctr++; // new instance requires GC
314 #endif
315 assert(check_compiled_frame(current), "incorrect caller");
316
317 // These checks are cheap to make and support reflective allocation.
318 int lh = klass->layout_helper();
319 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) {
320 Handle holder(current, klass->klass_holder()); // keep the klass alive
321 klass->check_valid_for_instantiation(false, THREAD);
322 if (!HAS_PENDING_EXCEPTION) {
323 InstanceKlass::cast(klass)->initialize(THREAD);
324 }
325 }
326
327 if (!HAS_PENDING_EXCEPTION) {
328 // Scavenge and allocate an instance.
329 Handle holder(current, klass->klass_holder()); // keep the klass alive
330 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD);
331 current->set_vm_result_oop(result);
332
333 // Pass oops back through thread local storage. Our apparent type to Java
334 // is that we return an oop, but we can block on exit from this routine and
335 // a GC can trash the oop in C's return register. The generated stub will
336 // fetch the oop from TLS after any possible GC.
337 }
338
339 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
340 JRT_BLOCK_END;
341
342 // inform GC that we won't do card marks for initializing writes.
343 SharedRuntime::on_slowpath_allocation_exit(current);
344 JRT_END
345
346
347 // array allocation
348 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_C, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current))
349 JRT_BLOCK;
350 #ifndef PRODUCT
351 SharedRuntime::_new_array_ctr++; // new array requires GC
352 #endif
353 assert(check_compiled_frame(current), "incorrect caller");
354
355 // Scavenge and allocate an instance.
356 oop result;
357
358 if (array_type->is_typeArray_klass()) {
359 // The oopFactory likes to work with the element type.
360 // (We could bypass the oopFactory, since it doesn't add much value.)
361 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
362 result = oopFactory::new_typeArray(elem_type, len, THREAD);
363 } else {
364 // Although the oopFactory likes to work with the elem_type,
365 // the compiler prefers the array_type, since it must already have
366 // that latter value in hand for the fast path.
367 Handle holder(current, array_type->klass_holder()); // keep the array klass alive
368 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass();
369 result = oopFactory::new_objArray(elem_type, len, THREAD);
370 }
371
372 // Pass oops back through thread local storage. Our apparent type to Java
373 // is that we return an oop, but we can block on exit from this routine and
374 // a GC can trash the oop in C's return register. The generated stub will
375 // fetch the oop from TLS after any possible GC.
376 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
377 current->set_vm_result_oop(result);
378 JRT_BLOCK_END;
379
380 // inform GC that we won't do card marks for initializing writes.
381 SharedRuntime::on_slowpath_allocation_exit(current);
382 JRT_END
383
384 // array allocation without zeroing
385 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_nozero_C, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current))
386 JRT_BLOCK;
387 #ifndef PRODUCT
388 SharedRuntime::_new_array_ctr++; // new array requires GC
389 #endif
390 assert(check_compiled_frame(current), "incorrect caller");
391
392 // Scavenge and allocate an instance.
393 oop result;
394
395 assert(array_type->is_typeArray_klass(), "should be called only for type array");
396 // The oopFactory likes to work with the element type.
397 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
398 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD);
399
400 // Pass oops back through thread local storage. Our apparent type to Java
401 // is that we return an oop, but we can block on exit from this routine and
402 // a GC can trash the oop in C's return register. The generated stub will
403 // fetch the oop from TLS after any possible GC.
404 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
405 current->set_vm_result_oop(result);
406 JRT_BLOCK_END;
407
408
409 // inform GC that we won't do card marks for initializing writes.
410 SharedRuntime::on_slowpath_allocation_exit(current);
411
412 oop result = current->vm_result_oop();
413 if ((len > 0) && (result != nullptr) &&
414 is_deoptimized_caller_frame(current)) {
415 // Zero array here if the caller is deoptimized.
416 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result);
417 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type();
418 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type);
419 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned");
420 HeapWord* obj = cast_from_oop<HeapWord*>(result);
421 if (!is_aligned(hs_bytes, BytesPerLong)) {
422 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0;
423 hs_bytes += BytesPerInt;
424 }
425
426 // Optimized zeroing.
427 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned");
428 const size_t aligned_hs = hs_bytes / BytesPerLong;
429 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs);
430 }
431
432 JRT_END
433
434 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array.
435
436 // multianewarray for 2 dimensions
437 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray2_C, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current))
438 #ifndef PRODUCT
439 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension
440 #endif
441 assert(check_compiled_frame(current), "incorrect caller");
442 assert(elem_type->is_klass(), "not a class");
443 jint dims[2];
444 dims[0] = len1;
445 dims[1] = len2;
446 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
447 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD);
448 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
449 current->set_vm_result_oop(obj);
450 JRT_END
451
452 // multianewarray for 3 dimensions
453 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray3_C, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current))
454 #ifndef PRODUCT
455 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension
456 #endif
457 assert(check_compiled_frame(current), "incorrect caller");
458 assert(elem_type->is_klass(), "not a class");
459 jint dims[3];
460 dims[0] = len1;
461 dims[1] = len2;
462 dims[2] = len3;
463 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
464 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD);
465 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
466 current->set_vm_result_oop(obj);
467 JRT_END
468
469 // multianewarray for 4 dimensions
470 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray4_C, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current))
471 #ifndef PRODUCT
472 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension
473 #endif
474 assert(check_compiled_frame(current), "incorrect caller");
475 assert(elem_type->is_klass(), "not a class");
476 jint dims[4];
477 dims[0] = len1;
478 dims[1] = len2;
479 dims[2] = len3;
480 dims[3] = len4;
481 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
482 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD);
483 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
484 current->set_vm_result_oop(obj);
485 JRT_END
486
487 // multianewarray for 5 dimensions
488 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current))
489 #ifndef PRODUCT
490 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension
491 #endif
492 assert(check_compiled_frame(current), "incorrect caller");
493 assert(elem_type->is_klass(), "not a class");
494 jint dims[5];
495 dims[0] = len1;
496 dims[1] = len2;
497 dims[2] = len3;
498 dims[3] = len4;
499 dims[4] = len5;
500 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
501 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD);
502 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
503 current->set_vm_result_oop(obj);
504 JRT_END
505
506 JRT_ENTRY_PROF(void, OptoRuntime, multianewarrayN_C, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current))
507 assert(check_compiled_frame(current), "incorrect caller");
508 assert(elem_type->is_klass(), "not a class");
509 assert(oop(dims)->is_typeArray(), "not an array");
510
511 ResourceMark rm;
512 jint len = dims->length();
513 assert(len > 0, "Dimensions array should contain data");
514 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
515 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0),
516 c_dims, len);
517
518 Handle holder(current, elem_type->klass_holder()); // keep the klass alive
519 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
520 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION);
521 current->set_vm_result_oop(obj);
522 JRT_END
523
524 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notify_C, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current))
525
526 // Very few notify/notifyAll operations find any threads on the waitset, so
527 // the dominant fast-path is to simply return.
528 // Relatedly, it's critical that notify/notifyAll be fast in order to
529 // reduce lock hold times.
530 if (!SafepointSynchronize::is_synchronizing()) {
531 if (ObjectSynchronizer::quick_notify(obj, current, false)) {
532 return;
533 }
534 }
535
536 // This is the case the fast-path above isn't provisioned to handle.
537 // The fast-path is designed to handle frequently arising cases in an efficient manner.
538 // (The fast-path is just a degenerate variant of the slow-path).
539 // Perform the dreaded state transition and pass control into the slow-path.
540 JRT_BLOCK;
541 Handle h_obj(current, obj);
542 ObjectSynchronizer::notify(h_obj, CHECK);
543 JRT_BLOCK_END;
544 JRT_END
545
546 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notifyAll_C, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current))
547
548 if (!SafepointSynchronize::is_synchronizing() ) {
549 if (ObjectSynchronizer::quick_notify(obj, current, true)) {
550 return;
551 }
552 }
553
554 // This is the case the fast-path above isn't provisioned to handle.
555 // The fast-path is designed to handle frequently arising cases in an efficient manner.
556 // (The fast-path is just a degenerate variant of the slow-path).
557 // Perform the dreaded state transition and pass control into the slow-path.
558 JRT_BLOCK;
559 Handle h_obj(current, obj);
560 ObjectSynchronizer::notifyall(h_obj, CHECK);
561 JRT_BLOCK_END;
562 JRT_END
563
564 JRT_ENTRY(void, OptoRuntime::vthread_end_first_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
565 MountUnmountDisabler::end_transition(current, vt, true /*is_mount*/, true /*is_thread_start*/);
566 JRT_END
567
568 JRT_ENTRY(void, OptoRuntime::vthread_start_final_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
569 java_lang_Thread::set_is_in_vthread_transition(vt, false);
570 current->set_is_in_vthread_transition(false);
571 MountUnmountDisabler::start_transition(current, vt, false /*is_mount */, true /*is_thread_end*/);
572 JRT_END
573
574 JRT_ENTRY(void, OptoRuntime::vthread_start_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
575 java_lang_Thread::set_is_in_vthread_transition(vt, false);
576 current->set_is_in_vthread_transition(false);
577 MountUnmountDisabler::start_transition(current, vt, is_mount, false /*is_thread_end*/);
578 JRT_END
579
580 JRT_ENTRY(void, OptoRuntime::vthread_end_transition_C(oopDesc* vt, jboolean is_mount, JavaThread* current))
581 MountUnmountDisabler::end_transition(current, vt, is_mount, false /*is_thread_start*/);
582 JRT_END
583
584 static const TypeFunc* make_new_instance_Type() {
585 // create input type (domain)
586 const Type **fields = TypeTuple::fields(1);
587 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
588 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
589
590 // create result type (range)
591 fields = TypeTuple::fields(1);
592 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
593
594 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
595
596 return TypeFunc::make(domain, range);
597 }
598
599 static const TypeFunc* make_vthread_transition_Type() {
600 // create input type (domain)
601 const Type **fields = TypeTuple::fields(2);
602 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop
603 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean
604 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
605
606 // no result type needed
607 fields = TypeTuple::fields(1);
608 fields[TypeFunc::Parms+0] = nullptr; // void
609 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
610
611 return TypeFunc::make(domain,range);
612 }
613
614 static const TypeFunc* make_athrow_Type() {
615 // create input type (domain)
616 const Type **fields = TypeTuple::fields(1);
617 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated
618 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
619
620 // create result type (range)
621 fields = TypeTuple::fields(0);
622
623 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
624
625 return TypeFunc::make(domain, range);
626 }
627
628 static const TypeFunc* make_new_array_Type() {
629 // create input type (domain)
630 const Type **fields = TypeTuple::fields(2);
631 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
632 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size
633 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
634
635 // create result type (range)
636 fields = TypeTuple::fields(1);
637 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
638
639 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
640
641 return TypeFunc::make(domain, range);
642 }
643
644 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) {
645 // create input type (domain)
646 const int nargs = ndim + 1;
647 const Type **fields = TypeTuple::fields(nargs);
648 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
649 for( int i = 1; i < nargs; i++ )
650 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size
651 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields);
652
653 // create result type (range)
654 fields = TypeTuple::fields(1);
655 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
656 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
657
658 return TypeFunc::make(domain, range);
659 }
660
661 static const TypeFunc* make_multianewarrayN_Type() {
662 // create input type (domain)
663 const Type **fields = TypeTuple::fields(2);
664 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass
665 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes
666 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
667
668 // create result type (range)
669 fields = TypeTuple::fields(1);
670 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
671 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
672
673 return TypeFunc::make(domain, range);
674 }
675
676 static const TypeFunc* make_uncommon_trap_Type() {
677 // create input type (domain)
678 const Type **fields = TypeTuple::fields(1);
679 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
680 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
681
682 // create result type (range)
683 fields = TypeTuple::fields(0);
684 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
685
686 return TypeFunc::make(domain, range);
687 }
688
689 //-----------------------------------------------------------------------------
690 // Monitor Handling
691
692 static const TypeFunc* make_complete_monitor_enter_Type() {
693 // create input type (domain)
694 const Type **fields = TypeTuple::fields(2);
695 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
696 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
697 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
698
699 // create result type (range)
700 fields = TypeTuple::fields(0);
701
702 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
703
704 return TypeFunc::make(domain,range);
705 }
706
707 //-----------------------------------------------------------------------------
708
709 static const TypeFunc* make_complete_monitor_exit_Type() {
710 // create input type (domain)
711 const Type **fields = TypeTuple::fields(3);
712 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
713 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock
714 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self)
715 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
716
717 // create result type (range)
718 fields = TypeTuple::fields(0);
719
720 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
721
722 return TypeFunc::make(domain, range);
723 }
724
725 static const TypeFunc* make_monitor_notify_Type() {
726 // create input type (domain)
727 const Type **fields = TypeTuple::fields(1);
728 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
729 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
730
731 // create result type (range)
732 fields = TypeTuple::fields(0);
733 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
734 return TypeFunc::make(domain, range);
735 }
736
737 static const TypeFunc* make_flush_windows_Type() {
738 // create input type (domain)
739 const Type** fields = TypeTuple::fields(1);
740 fields[TypeFunc::Parms+0] = nullptr; // void
741 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
742
743 // create result type
744 fields = TypeTuple::fields(1);
745 fields[TypeFunc::Parms+0] = nullptr; // void
746 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
747
748 return TypeFunc::make(domain, range);
749 }
750
751 static const TypeFunc* make_l2f_Type() {
752 // create input type (domain)
753 const Type **fields = TypeTuple::fields(2);
754 fields[TypeFunc::Parms+0] = TypeLong::LONG;
755 fields[TypeFunc::Parms+1] = Type::HALF;
756 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
757
758 // create result type (range)
759 fields = TypeTuple::fields(1);
760 fields[TypeFunc::Parms+0] = Type::FLOAT;
761 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
762
763 return TypeFunc::make(domain, range);
764 }
765
766 static const TypeFunc* make_modf_Type() {
767 const Type **fields = TypeTuple::fields(2);
768 fields[TypeFunc::Parms+0] = Type::FLOAT;
769 fields[TypeFunc::Parms+1] = Type::FLOAT;
770 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
771
772 // create result type (range)
773 fields = TypeTuple::fields(1);
774 fields[TypeFunc::Parms+0] = Type::FLOAT;
775
776 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
777
778 return TypeFunc::make(domain, range);
779 }
780
781 static const TypeFunc* make_Math_D_D_Type() {
782 // create input type (domain)
783 const Type **fields = TypeTuple::fields(2);
784 // Symbol* name of class to be loaded
785 fields[TypeFunc::Parms+0] = Type::DOUBLE;
786 fields[TypeFunc::Parms+1] = Type::HALF;
787 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
788
789 // create result type (range)
790 fields = TypeTuple::fields(2);
791 fields[TypeFunc::Parms+0] = Type::DOUBLE;
792 fields[TypeFunc::Parms+1] = Type::HALF;
793 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
794
795 return TypeFunc::make(domain, range);
796 }
797
798 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) {
799 // create input type (domain)
800 const Type **fields = TypeTuple::fields(num_arg);
801 // Symbol* name of class to be loaded
802 assert(num_arg > 0, "must have at least 1 input");
803 for (uint i = 0; i < num_arg; i++) {
804 fields[TypeFunc::Parms+i] = in_type;
805 }
806 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields);
807
808 // create result type (range)
809 const uint num_ret = 1;
810 fields = TypeTuple::fields(num_ret);
811 fields[TypeFunc::Parms+0] = out_type;
812 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields);
813
814 return TypeFunc::make(domain, range);
815 }
816
817 static const TypeFunc* make_Math_DD_D_Type() {
818 const Type **fields = TypeTuple::fields(4);
819 fields[TypeFunc::Parms+0] = Type::DOUBLE;
820 fields[TypeFunc::Parms+1] = Type::HALF;
821 fields[TypeFunc::Parms+2] = Type::DOUBLE;
822 fields[TypeFunc::Parms+3] = Type::HALF;
823 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields);
824
825 // create result type (range)
826 fields = TypeTuple::fields(2);
827 fields[TypeFunc::Parms+0] = Type::DOUBLE;
828 fields[TypeFunc::Parms+1] = Type::HALF;
829 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
830
831 return TypeFunc::make(domain, range);
832 }
833
834 //-------------- currentTimeMillis, currentTimeNanos, etc
835
836 static const TypeFunc* make_void_long_Type() {
837 // create input type (domain)
838 const Type **fields = TypeTuple::fields(0);
839 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
840
841 // create result type (range)
842 fields = TypeTuple::fields(2);
843 fields[TypeFunc::Parms+0] = TypeLong::LONG;
844 fields[TypeFunc::Parms+1] = Type::HALF;
845 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields);
846
847 return TypeFunc::make(domain, range);
848 }
849
850 static const TypeFunc* make_void_void_Type() {
851 // create input type (domain)
852 const Type **fields = TypeTuple::fields(0);
853 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
854
855 // create result type (range)
856 fields = TypeTuple::fields(0);
857 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
858 return TypeFunc::make(domain, range);
859 }
860
861 static const TypeFunc* make_jfr_write_checkpoint_Type() {
862 // create input type (domain)
863 const Type **fields = TypeTuple::fields(0);
864 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
865
866 // create result type (range)
867 fields = TypeTuple::fields(1);
868 fields[TypeFunc::Parms] = TypeInstPtr::BOTTOM;
869 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 1, fields);
870 return TypeFunc::make(domain, range);
871 }
872
873
874 // Takes as parameters:
875 // void *dest
876 // long size
877 // uchar byte
878
879 static const TypeFunc* make_setmemory_Type() {
880 // create input type (domain)
881 int argcnt = NOT_LP64(3) LP64_ONLY(4);
882 const Type** fields = TypeTuple::fields(argcnt);
883 int argp = TypeFunc::Parms;
884 fields[argp++] = TypePtr::NOTNULL; // dest
885 fields[argp++] = TypeX_X; // size
886 LP64_ONLY(fields[argp++] = Type::HALF); // size
887 fields[argp++] = TypeInt::UBYTE; // bytevalue
888 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
889 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
890
891 // no result type needed
892 fields = TypeTuple::fields(1);
893 fields[TypeFunc::Parms+0] = nullptr; // void
894 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
895 return TypeFunc::make(domain, range);
896 }
897
898 // arraycopy stub variations:
899 enum ArrayCopyType {
900 ac_fast, // void(ptr, ptr, size_t)
901 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr)
902 ac_slow, // void(ptr, int, ptr, int, int)
903 ac_generic // int(ptr, int, ptr, int, int)
904 };
905
906 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
907 // create input type (domain)
908 int num_args = (act == ac_fast ? 3 : 5);
909 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
910 int argcnt = num_args;
911 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
912 const Type** fields = TypeTuple::fields(argcnt);
913 int argp = TypeFunc::Parms;
914 fields[argp++] = TypePtr::NOTNULL; // src
915 if (num_size_args == 0) {
916 fields[argp++] = TypeInt::INT; // src_pos
917 }
918 fields[argp++] = TypePtr::NOTNULL; // dest
919 if (num_size_args == 0) {
920 fields[argp++] = TypeInt::INT; // dest_pos
921 fields[argp++] = TypeInt::INT; // length
922 }
923 while (num_size_args-- > 0) {
924 fields[argp++] = TypeX_X; // size in whatevers (size_t)
925 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
926 }
927 if (act == ac_checkcast) {
928 fields[argp++] = TypePtr::NOTNULL; // super_klass
929 }
930 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
931 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
932
933 // create result type if needed
934 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
935 fields = TypeTuple::fields(1);
936 if (retcnt == 0)
937 fields[TypeFunc::Parms+0] = nullptr; // void
938 else
939 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
940 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
941 return TypeFunc::make(domain, range);
942 }
943
944 static const TypeFunc* make_array_fill_Type() {
945 const Type** fields;
946 int argp = TypeFunc::Parms;
947 // create input type (domain): pointer, int, size_t
948 fields = TypeTuple::fields(3 LP64_ONLY( + 1));
949 fields[argp++] = TypePtr::NOTNULL;
950 fields[argp++] = TypeInt::INT;
951 fields[argp++] = TypeX_X; // size in whatevers (size_t)
952 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
953 const TypeTuple *domain = TypeTuple::make(argp, fields);
954
955 // create result type
956 fields = TypeTuple::fields(1);
957 fields[TypeFunc::Parms+0] = nullptr; // void
958 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
959
960 return TypeFunc::make(domain, range);
961 }
962
963 static const TypeFunc* make_array_partition_Type() {
964 // create input type (domain)
965 int num_args = 7;
966 int argcnt = num_args;
967 const Type** fields = TypeTuple::fields(argcnt);
968 int argp = TypeFunc::Parms;
969 fields[argp++] = TypePtr::NOTNULL; // array
970 fields[argp++] = TypeInt::INT; // element type
971 fields[argp++] = TypeInt::INT; // low
972 fields[argp++] = TypeInt::INT; // end
973 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array)
974 fields[argp++] = TypeInt::INT; // indexPivot1
975 fields[argp++] = TypeInt::INT; // indexPivot2
976 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
977 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
978
979 // no result type needed
980 fields = TypeTuple::fields(1);
981 fields[TypeFunc::Parms+0] = nullptr; // void
982 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
983 return TypeFunc::make(domain, range);
984 }
985
986 static const TypeFunc* make_array_sort_Type() {
987 // create input type (domain)
988 int num_args = 4;
989 int argcnt = num_args;
990 const Type** fields = TypeTuple::fields(argcnt);
991 int argp = TypeFunc::Parms;
992 fields[argp++] = TypePtr::NOTNULL; // array
993 fields[argp++] = TypeInt::INT; // element type
994 fields[argp++] = TypeInt::INT; // fromIndex
995 fields[argp++] = TypeInt::INT; // toIndex
996 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
997 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
998
999 // no result type needed
1000 fields = TypeTuple::fields(1);
1001 fields[TypeFunc::Parms+0] = nullptr; // void
1002 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1003 return TypeFunc::make(domain, range);
1004 }
1005
1006 static const TypeFunc* make_aescrypt_block_Type() {
1007 // create input type (domain)
1008 int num_args = 3;
1009 int argcnt = num_args;
1010 const Type** fields = TypeTuple::fields(argcnt);
1011 int argp = TypeFunc::Parms;
1012 fields[argp++] = TypePtr::NOTNULL; // src
1013 fields[argp++] = TypePtr::NOTNULL; // dest
1014 fields[argp++] = TypePtr::NOTNULL; // k array
1015 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1016 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1017
1018 // no result type needed
1019 fields = TypeTuple::fields(1);
1020 fields[TypeFunc::Parms+0] = nullptr; // void
1021 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1022 return TypeFunc::make(domain, range);
1023 }
1024
1025 static const TypeFunc* make_updateBytesCRC32_Type() {
1026 // create input type (domain)
1027 int num_args = 3;
1028 int argcnt = num_args;
1029 const Type** fields = TypeTuple::fields(argcnt);
1030 int argp = TypeFunc::Parms;
1031 fields[argp++] = TypeInt::INT; // crc
1032 fields[argp++] = TypePtr::NOTNULL; // src
1033 fields[argp++] = TypeInt::INT; // len
1034 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1035 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1036
1037 // result type needed
1038 fields = TypeTuple::fields(1);
1039 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1040 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1041 return TypeFunc::make(domain, range);
1042 }
1043
1044 static const TypeFunc* make_updateBytesCRC32C_Type() {
1045 // create input type (domain)
1046 int num_args = 4;
1047 int argcnt = num_args;
1048 const Type** fields = TypeTuple::fields(argcnt);
1049 int argp = TypeFunc::Parms;
1050 fields[argp++] = TypeInt::INT; // crc
1051 fields[argp++] = TypePtr::NOTNULL; // buf
1052 fields[argp++] = TypeInt::INT; // len
1053 fields[argp++] = TypePtr::NOTNULL; // table
1054 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1055 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1056
1057 // result type needed
1058 fields = TypeTuple::fields(1);
1059 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1060 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1061 return TypeFunc::make(domain, range);
1062 }
1063
1064 static const TypeFunc* make_updateBytesAdler32_Type() {
1065 // create input type (domain)
1066 int num_args = 3;
1067 int argcnt = num_args;
1068 const Type** fields = TypeTuple::fields(argcnt);
1069 int argp = TypeFunc::Parms;
1070 fields[argp++] = TypeInt::INT; // crc
1071 fields[argp++] = TypePtr::NOTNULL; // src + offset
1072 fields[argp++] = TypeInt::INT; // len
1073 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1074 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1075
1076 // result type needed
1077 fields = TypeTuple::fields(1);
1078 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
1079 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1080 return TypeFunc::make(domain, range);
1081 }
1082
1083 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() {
1084 // create input type (domain)
1085 int num_args = 5;
1086 int argcnt = num_args;
1087 const Type** fields = TypeTuple::fields(argcnt);
1088 int argp = TypeFunc::Parms;
1089 fields[argp++] = TypePtr::NOTNULL; // src
1090 fields[argp++] = TypePtr::NOTNULL; // dest
1091 fields[argp++] = TypePtr::NOTNULL; // k array
1092 fields[argp++] = TypePtr::NOTNULL; // r array
1093 fields[argp++] = TypeInt::INT; // src len
1094 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1095 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1096
1097 // returning cipher len (int)
1098 fields = TypeTuple::fields(1);
1099 fields[TypeFunc::Parms+0] = TypeInt::INT;
1100 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1101 return TypeFunc::make(domain, range);
1102 }
1103
1104 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() {
1105 // create input type (domain)
1106 int num_args = 4;
1107 int argcnt = num_args;
1108 const Type** fields = TypeTuple::fields(argcnt);
1109 int argp = TypeFunc::Parms;
1110 fields[argp++] = TypePtr::NOTNULL; // src
1111 fields[argp++] = TypePtr::NOTNULL; // dest
1112 fields[argp++] = TypePtr::NOTNULL; // k array
1113 fields[argp++] = TypeInt::INT; // src len
1114 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1115 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1116
1117 // returning cipher len (int)
1118 fields = TypeTuple::fields(1);
1119 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1120 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1121 return TypeFunc::make(domain, range);
1122 }
1123
1124 static const TypeFunc* make_counterMode_aescrypt_Type() {
1125 // create input type (domain)
1126 int num_args = 7;
1127 int argcnt = num_args;
1128 const Type** fields = TypeTuple::fields(argcnt);
1129 int argp = TypeFunc::Parms;
1130 fields[argp++] = TypePtr::NOTNULL; // src
1131 fields[argp++] = TypePtr::NOTNULL; // dest
1132 fields[argp++] = TypePtr::NOTNULL; // k array
1133 fields[argp++] = TypePtr::NOTNULL; // counter array
1134 fields[argp++] = TypeInt::INT; // src len
1135 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter
1136 fields[argp++] = TypePtr::NOTNULL; // saved used addr
1137 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1138 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1139 // returning cipher len (int)
1140 fields = TypeTuple::fields(1);
1141 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1142 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1143 return TypeFunc::make(domain, range);
1144 }
1145
1146 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() {
1147 // create input type (domain)
1148 int num_args = 8;
1149 int argcnt = num_args;
1150 const Type** fields = TypeTuple::fields(argcnt);
1151 int argp = TypeFunc::Parms;
1152 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs
1153 fields[argp++] = TypeInt::INT; // int len
1154 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs
1155 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs
1156 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj
1157 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj
1158 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj
1159 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj
1160
1161 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1162 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1163 // returning cipher len (int)
1164 fields = TypeTuple::fields(1);
1165 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1166 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1167 return TypeFunc::make(domain, range);
1168 }
1169
1170 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) {
1171 // create input type (domain)
1172 int num_args = is_sha3 ? 3 : 2;
1173 int argcnt = num_args;
1174 const Type** fields = TypeTuple::fields(argcnt);
1175 int argp = TypeFunc::Parms;
1176 fields[argp++] = TypePtr::NOTNULL; // buf
1177 fields[argp++] = TypePtr::NOTNULL; // state
1178 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1179 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1180 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1181
1182 // no result type needed
1183 fields = TypeTuple::fields(1);
1184 fields[TypeFunc::Parms+0] = nullptr; // void
1185 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1186 return TypeFunc::make(domain, range);
1187 }
1188
1189 /*
1190 * int implCompressMultiBlock(byte[] b, int ofs, int limit)
1191 */
1192 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) {
1193 // create input type (domain)
1194 int num_args = is_sha3 ? 5 : 4;
1195 int argcnt = num_args;
1196 const Type** fields = TypeTuple::fields(argcnt);
1197 int argp = TypeFunc::Parms;
1198 fields[argp++] = TypePtr::NOTNULL; // buf
1199 fields[argp++] = TypePtr::NOTNULL; // state
1200 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size
1201 fields[argp++] = TypeInt::INT; // ofs
1202 fields[argp++] = TypeInt::INT; // limit
1203 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1204 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1205
1206 // returning ofs (int)
1207 fields = TypeTuple::fields(1);
1208 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
1209 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1210 return TypeFunc::make(domain, range);
1211 }
1212
1213 // SHAKE128Parallel doubleKeccak function
1214 static const TypeFunc* make_double_keccak_Type() {
1215 int argcnt = 2;
1216
1217 const Type** fields = TypeTuple::fields(argcnt);
1218 int argp = TypeFunc::Parms;
1219 fields[argp++] = TypePtr::NOTNULL; // status0
1220 fields[argp++] = TypePtr::NOTNULL; // status1
1221
1222 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1223 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1224
1225 // result type needed
1226 fields = TypeTuple::fields(1);
1227 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1228 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1229 return TypeFunc::make(domain, range);
1230 }
1231
1232 static const TypeFunc* make_multiplyToLen_Type() {
1233 // create input type (domain)
1234 int num_args = 5;
1235 int argcnt = num_args;
1236 const Type** fields = TypeTuple::fields(argcnt);
1237 int argp = TypeFunc::Parms;
1238 fields[argp++] = TypePtr::NOTNULL; // x
1239 fields[argp++] = TypeInt::INT; // xlen
1240 fields[argp++] = TypePtr::NOTNULL; // y
1241 fields[argp++] = TypeInt::INT; // ylen
1242 fields[argp++] = TypePtr::NOTNULL; // z
1243 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1244 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1245
1246 // no result type needed
1247 fields = TypeTuple::fields(1);
1248 fields[TypeFunc::Parms+0] = nullptr;
1249 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1250 return TypeFunc::make(domain, range);
1251 }
1252
1253 static const TypeFunc* make_squareToLen_Type() {
1254 // create input type (domain)
1255 int num_args = 4;
1256 int argcnt = num_args;
1257 const Type** fields = TypeTuple::fields(argcnt);
1258 int argp = TypeFunc::Parms;
1259 fields[argp++] = TypePtr::NOTNULL; // x
1260 fields[argp++] = TypeInt::INT; // len
1261 fields[argp++] = TypePtr::NOTNULL; // z
1262 fields[argp++] = TypeInt::INT; // zlen
1263 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1264 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1265
1266 // no result type needed
1267 fields = TypeTuple::fields(1);
1268 fields[TypeFunc::Parms+0] = nullptr;
1269 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1270 return TypeFunc::make(domain, range);
1271 }
1272
1273 static const TypeFunc* make_mulAdd_Type() {
1274 // create input type (domain)
1275 int num_args = 5;
1276 int argcnt = num_args;
1277 const Type** fields = TypeTuple::fields(argcnt);
1278 int argp = TypeFunc::Parms;
1279 fields[argp++] = TypePtr::NOTNULL; // out
1280 fields[argp++] = TypePtr::NOTNULL; // in
1281 fields[argp++] = TypeInt::INT; // offset
1282 fields[argp++] = TypeInt::INT; // len
1283 fields[argp++] = TypeInt::INT; // k
1284 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1285 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1286
1287 // returning carry (int)
1288 fields = TypeTuple::fields(1);
1289 fields[TypeFunc::Parms+0] = TypeInt::INT;
1290 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
1291 return TypeFunc::make(domain, range);
1292 }
1293
1294 static const TypeFunc* make_montgomeryMultiply_Type() {
1295 // create input type (domain)
1296 int num_args = 7;
1297 int argcnt = num_args;
1298 const Type** fields = TypeTuple::fields(argcnt);
1299 int argp = TypeFunc::Parms;
1300 fields[argp++] = TypePtr::NOTNULL; // a
1301 fields[argp++] = TypePtr::NOTNULL; // b
1302 fields[argp++] = TypePtr::NOTNULL; // n
1303 fields[argp++] = TypeInt::INT; // len
1304 fields[argp++] = TypeLong::LONG; // inv
1305 fields[argp++] = Type::HALF;
1306 fields[argp++] = TypePtr::NOTNULL; // result
1307 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1308 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1309
1310 // result type needed
1311 fields = TypeTuple::fields(1);
1312 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1313
1314 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1315 return TypeFunc::make(domain, range);
1316 }
1317
1318 static const TypeFunc* make_montgomerySquare_Type() {
1319 // create input type (domain)
1320 int num_args = 6;
1321 int argcnt = num_args;
1322 const Type** fields = TypeTuple::fields(argcnt);
1323 int argp = TypeFunc::Parms;
1324 fields[argp++] = TypePtr::NOTNULL; // a
1325 fields[argp++] = TypePtr::NOTNULL; // n
1326 fields[argp++] = TypeInt::INT; // len
1327 fields[argp++] = TypeLong::LONG; // inv
1328 fields[argp++] = Type::HALF;
1329 fields[argp++] = TypePtr::NOTNULL; // result
1330 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1331 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1332
1333 // result type needed
1334 fields = TypeTuple::fields(1);
1335 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
1336
1337 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1338 return TypeFunc::make(domain, range);
1339 }
1340
1341 static const TypeFunc* make_bigIntegerShift_Type() {
1342 int argcnt = 5;
1343 const Type** fields = TypeTuple::fields(argcnt);
1344 int argp = TypeFunc::Parms;
1345 fields[argp++] = TypePtr::NOTNULL; // newArr
1346 fields[argp++] = TypePtr::NOTNULL; // oldArr
1347 fields[argp++] = TypeInt::INT; // newIdx
1348 fields[argp++] = TypeInt::INT; // shiftCount
1349 fields[argp++] = TypeInt::INT; // numIter
1350 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1351 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1352
1353 // no result type needed
1354 fields = TypeTuple::fields(1);
1355 fields[TypeFunc::Parms + 0] = nullptr;
1356 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1357 return TypeFunc::make(domain, range);
1358 }
1359
1360 static const TypeFunc* make_vectorizedMismatch_Type() {
1361 // create input type (domain)
1362 int num_args = 4;
1363 int argcnt = num_args;
1364 const Type** fields = TypeTuple::fields(argcnt);
1365 int argp = TypeFunc::Parms;
1366 fields[argp++] = TypePtr::NOTNULL; // obja
1367 fields[argp++] = TypePtr::NOTNULL; // objb
1368 fields[argp++] = TypeInt::INT; // length, number of elements
1369 fields[argp++] = TypeInt::INT; // log2scale, element size
1370 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1371 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1372
1373 //return mismatch index (int)
1374 fields = TypeTuple::fields(1);
1375 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1376 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1377 return TypeFunc::make(domain, range);
1378 }
1379
1380 static const TypeFunc* make_ghash_processBlocks_Type() {
1381 int argcnt = 4;
1382
1383 const Type** fields = TypeTuple::fields(argcnt);
1384 int argp = TypeFunc::Parms;
1385 fields[argp++] = TypePtr::NOTNULL; // state
1386 fields[argp++] = TypePtr::NOTNULL; // subkeyH
1387 fields[argp++] = TypePtr::NOTNULL; // data
1388 fields[argp++] = TypeInt::INT; // blocks
1389 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1390 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1391
1392 // result type needed
1393 fields = TypeTuple::fields(1);
1394 fields[TypeFunc::Parms+0] = nullptr; // void
1395 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1396 return TypeFunc::make(domain, range);
1397 }
1398
1399 static const TypeFunc* make_chacha20Block_Type() {
1400 int argcnt = 2;
1401
1402 const Type** fields = TypeTuple::fields(argcnt);
1403 int argp = TypeFunc::Parms;
1404 fields[argp++] = TypePtr::NOTNULL; // state
1405 fields[argp++] = TypePtr::NOTNULL; // result
1406
1407 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1408 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1409
1410 // result type needed
1411 fields = TypeTuple::fields(1);
1412 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int
1413 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1414 return TypeFunc::make(domain, range);
1415 }
1416
1417 // Kyber NTT function
1418 static const TypeFunc* make_kyberNtt_Type() {
1419 int argcnt = 2;
1420
1421 const Type** fields = TypeTuple::fields(argcnt);
1422 int argp = TypeFunc::Parms;
1423 fields[argp++] = TypePtr::NOTNULL; // coeffs
1424 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1425
1426 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1427 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1428
1429 // result type needed
1430 fields = TypeTuple::fields(1);
1431 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1432 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1433 return TypeFunc::make(domain, range);
1434 }
1435
1436 // Kyber inverse NTT function
1437 static const TypeFunc* make_kyberInverseNtt_Type() {
1438 int argcnt = 2;
1439
1440 const Type** fields = TypeTuple::fields(argcnt);
1441 int argp = TypeFunc::Parms;
1442 fields[argp++] = TypePtr::NOTNULL; // coeffs
1443 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1444
1445 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1446 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1447
1448 // result type needed
1449 fields = TypeTuple::fields(1);
1450 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1451 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1452 return TypeFunc::make(domain, range);
1453 }
1454
1455 // Kyber NTT multiply function
1456 static const TypeFunc* make_kyberNttMult_Type() {
1457 int argcnt = 4;
1458
1459 const Type** fields = TypeTuple::fields(argcnt);
1460 int argp = TypeFunc::Parms;
1461 fields[argp++] = TypePtr::NOTNULL; // result
1462 fields[argp++] = TypePtr::NOTNULL; // ntta
1463 fields[argp++] = TypePtr::NOTNULL; // nttb
1464 fields[argp++] = TypePtr::NOTNULL; // NTT multiply zetas
1465
1466 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1467 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1468
1469 // result type needed
1470 fields = TypeTuple::fields(1);
1471 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1472 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1473 return TypeFunc::make(domain, range);
1474 }
1475
1476 // Kyber add 2 polynomials function
1477 static const TypeFunc* make_kyberAddPoly_2_Type() {
1478 int argcnt = 3;
1479
1480 const Type** fields = TypeTuple::fields(argcnt);
1481 int argp = TypeFunc::Parms;
1482 fields[argp++] = TypePtr::NOTNULL; // result
1483 fields[argp++] = TypePtr::NOTNULL; // a
1484 fields[argp++] = TypePtr::NOTNULL; // b
1485
1486 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1487 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1488
1489 // result type needed
1490 fields = TypeTuple::fields(1);
1491 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1492 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1493 return TypeFunc::make(domain, range);
1494 }
1495
1496
1497 // Kyber add 3 polynomials function
1498 static const TypeFunc* make_kyberAddPoly_3_Type() {
1499 int argcnt = 4;
1500
1501 const Type** fields = TypeTuple::fields(argcnt);
1502 int argp = TypeFunc::Parms;
1503 fields[argp++] = TypePtr::NOTNULL; // result
1504 fields[argp++] = TypePtr::NOTNULL; // a
1505 fields[argp++] = TypePtr::NOTNULL; // b
1506 fields[argp++] = TypePtr::NOTNULL; // c
1507
1508 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1509 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1510
1511 // result type needed
1512 fields = TypeTuple::fields(1);
1513 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1514 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1515 return TypeFunc::make(domain, range);
1516 }
1517
1518
1519 // Kyber XOF output parsing into polynomial coefficients candidates
1520 // or decompress(12,...) function
1521 static const TypeFunc* make_kyber12To16_Type() {
1522 int argcnt = 4;
1523
1524 const Type** fields = TypeTuple::fields(argcnt);
1525 int argp = TypeFunc::Parms;
1526 fields[argp++] = TypePtr::NOTNULL; // condensed
1527 fields[argp++] = TypeInt::INT; // condensedOffs
1528 fields[argp++] = TypePtr::NOTNULL; // parsed
1529 fields[argp++] = TypeInt::INT; // parsedLength
1530
1531 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1532 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1533
1534 // result type needed
1535 fields = TypeTuple::fields(1);
1536 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1537 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1538 return TypeFunc::make(domain, range);
1539 }
1540
1541 // Kyber Barrett reduce function
1542 static const TypeFunc* make_kyberBarrettReduce_Type() {
1543 int argcnt = 1;
1544
1545 const Type** fields = TypeTuple::fields(argcnt);
1546 int argp = TypeFunc::Parms;
1547 fields[argp++] = TypePtr::NOTNULL; // coeffs
1548
1549 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1550 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1551
1552 // result type needed
1553 fields = TypeTuple::fields(1);
1554 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1555 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1556 return TypeFunc::make(domain, range);
1557 }
1558
1559 // Dilithium NTT function except for the final "normalization" to |coeff| < Q
1560 static const TypeFunc* make_dilithiumAlmostNtt_Type() {
1561 int argcnt = 2;
1562
1563 const Type** fields = TypeTuple::fields(argcnt);
1564 int argp = TypeFunc::Parms;
1565 fields[argp++] = TypePtr::NOTNULL; // coeffs
1566 fields[argp++] = TypePtr::NOTNULL; // NTT zetas
1567
1568 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1569 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1570
1571 // result type needed
1572 fields = TypeTuple::fields(1);
1573 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1574 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1575 return TypeFunc::make(domain, range);
1576 }
1577
1578 // Dilithium inverse NTT function except the final mod Q division by 2^256
1579 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() {
1580 int argcnt = 2;
1581
1582 const Type** fields = TypeTuple::fields(argcnt);
1583 int argp = TypeFunc::Parms;
1584 fields[argp++] = TypePtr::NOTNULL; // coeffs
1585 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas
1586
1587 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1588 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1589
1590 // result type needed
1591 fields = TypeTuple::fields(1);
1592 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1593 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1594 return TypeFunc::make(domain, range);
1595 }
1596
1597 // Dilithium NTT multiply function
1598 static const TypeFunc* make_dilithiumNttMult_Type() {
1599 int argcnt = 3;
1600
1601 const Type** fields = TypeTuple::fields(argcnt);
1602 int argp = TypeFunc::Parms;
1603 fields[argp++] = TypePtr::NOTNULL; // result
1604 fields[argp++] = TypePtr::NOTNULL; // ntta
1605 fields[argp++] = TypePtr::NOTNULL; // nttb
1606
1607 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1608 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1609
1610 // result type needed
1611 fields = TypeTuple::fields(1);
1612 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1613 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1614 return TypeFunc::make(domain, range);
1615 }
1616
1617 // Dilithium Montgomery multiply a polynome coefficient array by a constant
1618 static const TypeFunc* make_dilithiumMontMulByConstant_Type() {
1619 int argcnt = 2;
1620
1621 const Type** fields = TypeTuple::fields(argcnt);
1622 int argp = TypeFunc::Parms;
1623 fields[argp++] = TypePtr::NOTNULL; // coeffs
1624 fields[argp++] = TypeInt::INT; // constant multiplier
1625
1626 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1627 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1628
1629 // result type needed
1630 fields = TypeTuple::fields(1);
1631 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1632 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1633 return TypeFunc::make(domain, range);
1634 }
1635
1636 // Dilithium decompose polynomial
1637 static const TypeFunc* make_dilithiumDecomposePoly_Type() {
1638 int argcnt = 5;
1639
1640 const Type** fields = TypeTuple::fields(argcnt);
1641 int argp = TypeFunc::Parms;
1642 fields[argp++] = TypePtr::NOTNULL; // input
1643 fields[argp++] = TypePtr::NOTNULL; // lowPart
1644 fields[argp++] = TypePtr::NOTNULL; // highPart
1645 fields[argp++] = TypeInt::INT; // 2 * gamma2
1646 fields[argp++] = TypeInt::INT; // multiplier
1647
1648 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1649 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
1650
1651 // result type needed
1652 fields = TypeTuple::fields(1);
1653 fields[TypeFunc::Parms + 0] = TypeInt::INT;
1654 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1655 return TypeFunc::make(domain, range);
1656 }
1657
1658 static const TypeFunc* make_base64_encodeBlock_Type() {
1659 int argcnt = 6;
1660
1661 const Type** fields = TypeTuple::fields(argcnt);
1662 int argp = TypeFunc::Parms;
1663 fields[argp++] = TypePtr::NOTNULL; // src array
1664 fields[argp++] = TypeInt::INT; // offset
1665 fields[argp++] = TypeInt::INT; // length
1666 fields[argp++] = TypePtr::NOTNULL; // dest array
1667 fields[argp++] = TypeInt::INT; // dp
1668 fields[argp++] = TypeInt::BOOL; // isURL
1669 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1670 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1671
1672 // result type needed
1673 fields = TypeTuple::fields(1);
1674 fields[TypeFunc::Parms + 0] = nullptr; // void
1675 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1676 return TypeFunc::make(domain, range);
1677 }
1678
1679 static const TypeFunc* make_string_IndexOf_Type() {
1680 int argcnt = 4;
1681
1682 const Type** fields = TypeTuple::fields(argcnt);
1683 int argp = TypeFunc::Parms;
1684 fields[argp++] = TypePtr::NOTNULL; // haystack array
1685 fields[argp++] = TypeInt::INT; // haystack length
1686 fields[argp++] = TypePtr::NOTNULL; // needle array
1687 fields[argp++] = TypeInt::INT; // needle length
1688 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1689 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1690
1691 // result type needed
1692 fields = TypeTuple::fields(1);
1693 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack
1694 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1695 return TypeFunc::make(domain, range);
1696 }
1697
1698 static const TypeFunc* make_base64_decodeBlock_Type() {
1699 int argcnt = 7;
1700
1701 const Type** fields = TypeTuple::fields(argcnt);
1702 int argp = TypeFunc::Parms;
1703 fields[argp++] = TypePtr::NOTNULL; // src array
1704 fields[argp++] = TypeInt::INT; // src offset
1705 fields[argp++] = TypeInt::INT; // src length
1706 fields[argp++] = TypePtr::NOTNULL; // dest array
1707 fields[argp++] = TypeInt::INT; // dest offset
1708 fields[argp++] = TypeInt::BOOL; // isURL
1709 fields[argp++] = TypeInt::BOOL; // isMIME
1710 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1711 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1712
1713 // result type needed
1714 fields = TypeTuple::fields(1);
1715 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst
1716 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
1717 return TypeFunc::make(domain, range);
1718 }
1719
1720 static const TypeFunc* make_poly1305_processBlocks_Type() {
1721 int argcnt = 4;
1722
1723 const Type** fields = TypeTuple::fields(argcnt);
1724 int argp = TypeFunc::Parms;
1725 fields[argp++] = TypePtr::NOTNULL; // input array
1726 fields[argp++] = TypeInt::INT; // input length
1727 fields[argp++] = TypePtr::NOTNULL; // accumulator array
1728 fields[argp++] = TypePtr::NOTNULL; // r array
1729 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1730 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1731
1732 // result type needed
1733 fields = TypeTuple::fields(1);
1734 fields[TypeFunc::Parms + 0] = nullptr; // void
1735 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1736 return TypeFunc::make(domain, range);
1737 }
1738
1739 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() {
1740 int argcnt = 3;
1741
1742 const Type** fields = TypeTuple::fields(argcnt);
1743 int argp = TypeFunc::Parms;
1744 fields[argp++] = TypePtr::NOTNULL; // a array
1745 fields[argp++] = TypePtr::NOTNULL; // b array
1746 fields[argp++] = TypePtr::NOTNULL; // r(esult) array
1747 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1748 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1749
1750 // result type needed
1751 fields = TypeTuple::fields(1);
1752 fields[TypeFunc::Parms + 0] = nullptr; // void
1753 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1754 return TypeFunc::make(domain, range);
1755 }
1756
1757 static const TypeFunc* make_intpoly_assign_Type() {
1758 int argcnt = 4;
1759
1760 const Type** fields = TypeTuple::fields(argcnt);
1761 int argp = TypeFunc::Parms;
1762 fields[argp++] = TypeInt::INT; // set flag
1763 fields[argp++] = TypePtr::NOTNULL; // a array (result)
1764 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set)
1765 fields[argp++] = TypeInt::INT; // array length
1766 assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
1767 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1768
1769 // result type needed
1770 fields = TypeTuple::fields(1);
1771 fields[TypeFunc::Parms + 0] = nullptr; // void
1772 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1773 return TypeFunc::make(domain, range);
1774 }
1775
1776 //------------- Interpreter state for on stack replacement
1777 static const TypeFunc* make_osr_end_Type() {
1778 // create input type (domain)
1779 const Type **fields = TypeTuple::fields(1);
1780 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf
1781 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
1782
1783 // create result type
1784 fields = TypeTuple::fields(1);
1785 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
1786 fields[TypeFunc::Parms+0] = nullptr; // void
1787 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
1788 return TypeFunc::make(domain, range);
1789 }
1790
1791 #ifndef PRODUCT
1792 static void debug_print_convert_type(const Type** fields, int* argp, Node *parm) {
1793 const BasicType bt = parm->bottom_type()->basic_type();
1794 fields[(*argp)++] = Type::get_const_basic_type(bt);
1795 if (bt == T_LONG || bt == T_DOUBLE) {
1796 fields[(*argp)++] = Type::HALF;
1797 }
1798 }
1799
1800 static void update_arg_cnt(const Node* parm, int* arg_cnt) {
1801 (*arg_cnt)++;
1802 const BasicType bt = parm->bottom_type()->basic_type();
1803 if (bt == T_LONG || bt == T_DOUBLE) {
1804 (*arg_cnt)++;
1805 }
1806 }
1807
1808 const TypeFunc* OptoRuntime::debug_print_Type(Node* parm0, Node* parm1,
1809 Node* parm2, Node* parm3,
1810 Node* parm4, Node* parm5,
1811 Node* parm6) {
1812 int argcnt = 1;
1813 if (parm0 != nullptr) { update_arg_cnt(parm0, &argcnt);
1814 if (parm1 != nullptr) { update_arg_cnt(parm1, &argcnt);
1815 if (parm2 != nullptr) { update_arg_cnt(parm2, &argcnt);
1816 if (parm3 != nullptr) { update_arg_cnt(parm3, &argcnt);
1817 if (parm4 != nullptr) { update_arg_cnt(parm4, &argcnt);
1818 if (parm5 != nullptr) { update_arg_cnt(parm5, &argcnt);
1819 if (parm6 != nullptr) { update_arg_cnt(parm6, &argcnt);
1820 /* close each nested if ===> */ } } } } } } }
1821
1822 // create input type (domain)
1823 const Type** fields = TypeTuple::fields(argcnt);
1824 int argp = TypeFunc::Parms;
1825 fields[argp++] = TypePtr::NOTNULL; // static string pointer
1826
1827 if (parm0 != nullptr) { debug_print_convert_type(fields, &argp, parm0);
1828 if (parm1 != nullptr) { debug_print_convert_type(fields, &argp, parm1);
1829 if (parm2 != nullptr) { debug_print_convert_type(fields, &argp, parm2);
1830 if (parm3 != nullptr) { debug_print_convert_type(fields, &argp, parm3);
1831 if (parm4 != nullptr) { debug_print_convert_type(fields, &argp, parm4);
1832 if (parm5 != nullptr) { debug_print_convert_type(fields, &argp, parm5);
1833 if (parm6 != nullptr) { debug_print_convert_type(fields, &argp, parm6);
1834 /* close each nested if ===> */ } } } } } } }
1835
1836 assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
1837 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
1838
1839 // no result type needed
1840 fields = TypeTuple::fields(1);
1841 fields[TypeFunc::Parms+0] = nullptr; // void
1842 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
1843 return TypeFunc::make(domain, range);
1844 }
1845 #endif // PRODUCT
1846
1847 //-------------------------------------------------------------------------------------
1848 // register policy
1849
1850 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) {
1851 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register");
1852 switch (register_save_policy[reg]) {
1853 case 'C': return false; //SOC
1854 case 'E': return true ; //SOE
1855 case 'N': return false; //NS
1856 case 'A': return false; //AS
1857 }
1858 ShouldNotReachHere();
1859 return false;
1860 }
1861
1862 //-----------------------------------------------------------------------
1863 // Exceptions
1864 //
1865
1866 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg);
1867
1868 // The method is an entry that is always called by a C++ method not
1869 // directly from compiled code. Compiled code will call the C++ method following.
1870 // We can't allow async exception to be installed during exception processing.
1871 JRT_ENTRY_NO_ASYNC_PROF(address, OptoRuntime, handle_exception_C_helper, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm))
1872 // The frame we rethrow the exception to might not have been processed by the GC yet.
1873 // The stack watermark barrier takes care of detecting that and ensuring the frame
1874 // has updated oops.
1875 StackWatermarkSet::after_unwind(current);
1876
1877 // Do not confuse exception_oop with pending_exception. The exception_oop
1878 // is only used to pass arguments into the method. Not for general
1879 // exception handling. DO NOT CHANGE IT to use pending_exception, since
1880 // the runtime stubs checks this on exit.
1881 assert(current->exception_oop() != nullptr, "exception oop is found");
1882 address handler_address = nullptr;
1883
1884 Handle exception(current, current->exception_oop());
1885 address pc = current->exception_pc();
1886
1887 // Clear out the exception oop and pc since looking up an
1888 // exception handler can cause class loading, which might throw an
1889 // exception and those fields are expected to be clear during
1890 // normal bytecode execution.
1891 current->clear_exception_oop_and_pc();
1892
1893 LogTarget(Info, exceptions) lt;
1894 if (lt.is_enabled()) {
1895 LogStream ls(lt);
1896 trace_exception(&ls, exception(), pc, "");
1897 }
1898
1899 // for AbortVMOnException flag
1900 Exceptions::debug_check_abort(exception);
1901
1902 #ifdef ASSERT
1903 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
1904 // should throw an exception here
1905 ShouldNotReachHere();
1906 }
1907 #endif
1908
1909 // new exception handling: this method is entered only from adapters
1910 // exceptions from compiled java methods are handled in compiled code
1911 // using rethrow node
1912
1913 nm = CodeCache::find_nmethod(pc);
1914 assert(nm != nullptr, "No NMethod found");
1915 if (nm->is_native_method()) {
1916 fatal("Native method should not have path to exception handling");
1917 } else {
1918 // we are switching to old paradigm: search for exception handler in caller_frame
1919 // instead in exception handler of caller_frame.sender()
1920
1921 if (JvmtiExport::can_post_on_exceptions()) {
1922 // "Full-speed catching" is not necessary here,
1923 // since we're notifying the VM on every catch.
1924 // Force deoptimization and the rest of the lookup
1925 // will be fine.
1926 deoptimize_caller_frame(current);
1927 }
1928
1929 // Check the stack guard pages. If enabled, look for handler in this frame;
1930 // otherwise, forcibly unwind the frame.
1931 //
1932 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
1933 bool force_unwind = !current->stack_overflow_state()->reguard_stack();
1934 bool deopting = false;
1935 if (nm->is_deopt_pc(pc)) {
1936 deopting = true;
1937 RegisterMap map(current,
1938 RegisterMap::UpdateMap::skip,
1939 RegisterMap::ProcessFrames::include,
1940 RegisterMap::WalkContinuation::skip);
1941 frame deoptee = current->last_frame().sender(&map);
1942 assert(deoptee.is_deoptimized_frame(), "must be deopted");
1943 // Adjust the pc back to the original throwing pc
1944 pc = deoptee.pc();
1945 }
1946
1947 // If we are forcing an unwind because of stack overflow then deopt is
1948 // irrelevant since we are throwing the frame away anyway.
1949
1950 if (deopting && !force_unwind) {
1951 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
1952 } else {
1953
1954 handler_address =
1955 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
1956
1957 if (handler_address == nullptr) {
1958 bool recursive_exception = false;
1959 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1960 assert (handler_address != nullptr, "must have compiled handler");
1961 // Update the exception cache only when the unwind was not forced
1962 // and there didn't happen another exception during the computation of the
1963 // compiled exception handler. Checking for exception oop equality is not
1964 // sufficient because some exceptions are pre-allocated and reused.
1965 if (!force_unwind && !recursive_exception) {
1966 nm->add_handler_for_exception_and_pc(exception,pc,handler_address);
1967 }
1968 } else {
1969 #ifdef ASSERT
1970 bool recursive_exception = false;
1971 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
1972 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT,
1973 p2i(handler_address), p2i(computed_address));
1974 #endif
1975 }
1976 }
1977
1978 current->set_exception_pc(pc);
1979 current->set_exception_handler_pc(handler_address);
1980 }
1981
1982 // Restore correct return pc. Was saved above.
1983 current->set_exception_oop(exception());
1984 return handler_address;
1985
1986 JRT_END
1987
1988 // We are entering here from exception_blob
1989 // If there is a compiled exception handler in this method, we will continue there;
1990 // otherwise we will unwind the stack and continue at the caller of top frame method
1991 // Note we enter without the usual JRT wrapper. We will call a helper routine that
1992 // will do the normal VM entry. We do it this way so that we can see if the nmethod
1993 // we looked up the handler for has been deoptimized in the meantime. If it has been
1994 // we must not use the handler and instead return the deopt blob.
1995 address OptoRuntime::handle_exception_C(JavaThread* current) {
1996 //
1997 // We are in Java not VM and in debug mode we have a NoHandleMark
1998 //
1999 #ifndef PRODUCT
2000 SharedRuntime::_find_handler_ctr++; // find exception handler
2001 #endif
2002 DEBUG_ONLY(NoHandleMark __hm;)
2003 nmethod* nm = nullptr;
2004 address handler_address = nullptr;
2005 {
2006 // Enter the VM
2007
2008 ResetNoHandleMark rnhm;
2009 handler_address = handle_exception_C_helper(current, nm);
2010 }
2011
2012 // Back in java: Use no oops, DON'T safepoint
2013
2014 // Now check to see if the handler we are returning is in a now
2015 // deoptimized frame
2016
2017 if (nm != nullptr) {
2018 RegisterMap map(current,
2019 RegisterMap::UpdateMap::skip,
2020 RegisterMap::ProcessFrames::skip,
2021 RegisterMap::WalkContinuation::skip);
2022 frame caller = current->last_frame().sender(&map);
2023 #ifdef ASSERT
2024 assert(caller.is_compiled_frame(), "must be");
2025 #endif // ASSERT
2026 if (caller.is_deoptimized_frame()) {
2027 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
2028 }
2029 }
2030 return handler_address;
2031 }
2032
2033 //------------------------------rethrow----------------------------------------
2034 // We get here after compiled code has executed a 'RethrowNode'. The callee
2035 // is either throwing or rethrowing an exception. The callee-save registers
2036 // have been restored, synchronized objects have been unlocked and the callee
2037 // stack frame has been removed. The return address was passed in.
2038 // Exception oop is passed as the 1st argument. This routine is then called
2039 // from the stub. On exit, we know where to jump in the caller's code.
2040 // After this C code exits, the stub will pop his frame and end in a jump
2041 // (instead of a return). We enter the caller's default handler.
2042 //
2043 // This must be JRT_LEAF:
2044 // - caller will not change its state as we cannot block on exit,
2045 // therefore raw_exception_handler_for_return_address is all it takes
2046 // to handle deoptimized blobs
2047 //
2048 // However, there needs to be a safepoint check in the middle! So compiled
2049 // safepoints are completely watertight.
2050 //
2051 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier.
2052 //
2053 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
2054 //
2055 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
2056 // ret_pc will have been loaded from the stack, so for AArch64 will be signed.
2057 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc));
2058
2059 #ifndef PRODUCT
2060 SharedRuntime::_rethrow_ctr++; // count rethrows
2061 #endif
2062 assert (exception != nullptr, "should have thrown a NullPointerException");
2063 #ifdef ASSERT
2064 if (!(exception->is_a(vmClasses::Throwable_klass()))) {
2065 // should throw an exception here
2066 ShouldNotReachHere();
2067 }
2068 #endif
2069
2070 thread->set_vm_result_oop(exception);
2071 // Frame not compiled (handles deoptimization blob)
2072 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
2073 }
2074
2075 static const TypeFunc* make_rethrow_Type() {
2076 // create input type (domain)
2077 const Type **fields = TypeTuple::fields(1);
2078 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2079 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2080
2081 // create result type (range)
2082 fields = TypeTuple::fields(1);
2083 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop
2084 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
2085
2086 return TypeFunc::make(domain, range);
2087 }
2088
2089
2090 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
2091 // Deoptimize the caller before continuing, as the compiled
2092 // exception handler table may not be valid.
2093 if (DeoptimizeOnAllocationException && doit) {
2094 deoptimize_caller_frame(thread);
2095 }
2096 }
2097
2098 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
2099 // Called from within the owner thread, so no need for safepoint
2100 RegisterMap reg_map(thread,
2101 RegisterMap::UpdateMap::include,
2102 RegisterMap::ProcessFrames::include,
2103 RegisterMap::WalkContinuation::skip);
2104 frame stub_frame = thread->last_frame();
2105 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2106 frame caller_frame = stub_frame.sender(®_map);
2107
2108 // Deoptimize the caller frame.
2109 Deoptimization::deoptimize_frame(thread, caller_frame.id());
2110 }
2111
2112
2113 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) {
2114 // Called from within the owner thread, so no need for safepoint
2115 RegisterMap reg_map(thread,
2116 RegisterMap::UpdateMap::include,
2117 RegisterMap::ProcessFrames::include,
2118 RegisterMap::WalkContinuation::skip);
2119 frame stub_frame = thread->last_frame();
2120 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
2121 frame caller_frame = stub_frame.sender(®_map);
2122 return caller_frame.is_deoptimized_frame();
2123 }
2124
2125 static const TypeFunc* make_register_finalizer_Type() {
2126 // create input type (domain)
2127 const Type **fields = TypeTuple::fields(1);
2128 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver
2129 // // The JavaThread* is passed to each routine as the last argument
2130 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
2131 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2132
2133 // create result type (range)
2134 fields = TypeTuple::fields(0);
2135
2136 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2137
2138 return TypeFunc::make(domain,range);
2139 }
2140
2141 const TypeFunc *OptoRuntime::class_init_barrier_Type() {
2142 // create input type (domain)
2143 const Type** fields = TypeTuple::fields(1);
2144 fields[TypeFunc::Parms+0] = TypeKlassPtr::NOTNULL;
2145 // // The JavaThread* is passed to each routine as the last argument
2146 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread
2147 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+1, fields);
2148
2149 // create result type (range)
2150 fields = TypeTuple::fields(0);
2151 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
2152 return TypeFunc::make(domain,range);
2153 }
2154
2155 #if INCLUDE_JFR
2156 static const TypeFunc* make_class_id_load_barrier_Type() {
2157 // create input type (domain)
2158 const Type **fields = TypeTuple::fields(1);
2159 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS;
2160 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields);
2161
2162 // create result type (range)
2163 fields = TypeTuple::fields(0);
2164
2165 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields);
2166
2167 return TypeFunc::make(domain,range);
2168 }
2169 #endif // INCLUDE_JFR
2170
2171 //-----------------------------------------------------------------------------
2172 // runtime upcall support
2173 const TypeFunc *OptoRuntime::runtime_up_call_Type() {
2174 // create input type (domain)
2175 const Type **fields = TypeTuple::fields(1);
2176 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2177 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields);
2178
2179 // create result type (range)
2180 fields = TypeTuple::fields(0);
2181
2182 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2183
2184 return TypeFunc::make(domain,range);
2185 }
2186
2187 //-----------------------------------------------------------------------------
2188 static const TypeFunc* make_dtrace_method_entry_exit_Type() {
2189 // create input type (domain)
2190 const Type **fields = TypeTuple::fields(2);
2191 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2192 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering
2193 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2194
2195 // create result type (range)
2196 fields = TypeTuple::fields(0);
2197
2198 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2199
2200 return TypeFunc::make(domain,range);
2201 }
2202
2203 static const TypeFunc* make_dtrace_object_alloc_Type() {
2204 // create input type (domain)
2205 const Type **fields = TypeTuple::fields(2);
2206 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage
2207 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object
2208
2209 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
2210
2211 // create result type (range)
2212 fields = TypeTuple::fields(0);
2213
2214 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
2215
2216 return TypeFunc::make(domain,range);
2217 }
2218
2219 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, register_finalizer_C, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
2220 assert(oopDesc::is_oop(obj), "must be a valid oop");
2221 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
2222 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
2223 JRT_END
2224
2225 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, class_init_barrier_C, OptoRuntime::class_init_barrier_C(Klass* k, JavaThread* current))
2226 InstanceKlass* ik = InstanceKlass::cast(k);
2227 if (ik->should_be_initialized()) {
2228 ik->initialize(CHECK);
2229 } else if (UsePerfData) {
2230 _perf_OptoRuntime_class_init_barrier_redundant_count->inc();
2231 }
2232 JRT_END
2233
2234 //-----------------------------------------------------------------------------
2235
2236 NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
2237
2238 //
2239 // dump the collected NamedCounters.
2240 //
2241 void OptoRuntime::print_named_counters() {
2242 int total_lock_count = 0;
2243 int eliminated_lock_count = 0;
2244
2245 NamedCounter* c = _named_counters;
2246 while (c) {
2247 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
2248 int count = c->count();
2249 if (count > 0) {
2250 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter;
2251 if (Verbose) {
2252 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : "");
2253 }
2254 total_lock_count += count;
2255 if (eliminated) {
2256 eliminated_lock_count += count;
2257 }
2258 }
2259 }
2260 c = c->next();
2261 }
2262 if (total_lock_count > 0) {
2263 tty->print_cr("dynamic locks: %d", total_lock_count);
2264 if (eliminated_lock_count) {
2265 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
2266 (int)(eliminated_lock_count * 100.0 / total_lock_count));
2267 }
2268 }
2269 }
2270
2271 //
2272 // Allocate a new NamedCounter. The JVMState is used to generate the
2273 // name which consists of method@line for the inlining tree.
2274 //
2275
2276 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) {
2277 int max_depth = youngest_jvms->depth();
2278
2279 // Visit scopes from youngest to oldest.
2280 bool first = true;
2281 stringStream st;
2282 for (int depth = max_depth; depth >= 1; depth--) {
2283 JVMState* jvms = youngest_jvms->of_depth(depth);
2284 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
2285 if (!first) {
2286 st.print(" ");
2287 } else {
2288 first = false;
2289 }
2290 int bci = jvms->bci();
2291 if (bci < 0) bci = 0;
2292 if (m != nullptr) {
2293 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
2294 } else {
2295 st.print("no method");
2296 }
2297 st.print("@%d", bci);
2298 // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
2299 }
2300 NamedCounter* c = new NamedCounter(st.freeze(), tag);
2301
2302 // atomically add the new counter to the head of the list. We only
2303 // add counters so this is safe.
2304 NamedCounter* head;
2305 do {
2306 c->set_next(nullptr);
2307 head = _named_counters;
2308 c->set_next(head);
2309 } while (AtomicAccess::cmpxchg(&_named_counters, head, c) != head);
2310 return c;
2311 }
2312
2313 void OptoRuntime::initialize_types() {
2314 _new_instance_Type = make_new_instance_Type();
2315 _new_array_Type = make_new_array_Type();
2316 _multianewarray2_Type = multianewarray_Type(2);
2317 _multianewarray3_Type = multianewarray_Type(3);
2318 _multianewarray4_Type = multianewarray_Type(4);
2319 _multianewarray5_Type = multianewarray_Type(5);
2320 _multianewarrayN_Type = make_multianewarrayN_Type();
2321 _complete_monitor_enter_Type = make_complete_monitor_enter_Type();
2322 _complete_monitor_exit_Type = make_complete_monitor_exit_Type();
2323 _monitor_notify_Type = make_monitor_notify_Type();
2324 _uncommon_trap_Type = make_uncommon_trap_Type();
2325 _athrow_Type = make_athrow_Type();
2326 _rethrow_Type = make_rethrow_Type();
2327 _Math_D_D_Type = make_Math_D_D_Type();
2328 _Math_DD_D_Type = make_Math_DD_D_Type();
2329 _modf_Type = make_modf_Type();
2330 _l2f_Type = make_l2f_Type();
2331 _void_long_Type = make_void_long_Type();
2332 _void_void_Type = make_void_void_Type();
2333 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type();
2334 _flush_windows_Type = make_flush_windows_Type();
2335 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast);
2336 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast);
2337 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic);
2338 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow);
2339 _unsafe_setmemory_Type = make_setmemory_Type();
2340 _array_fill_Type = make_array_fill_Type();
2341 _array_sort_Type = make_array_sort_Type();
2342 _array_partition_Type = make_array_partition_Type();
2343 _aescrypt_block_Type = make_aescrypt_block_Type();
2344 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type();
2345 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type();
2346 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type();
2347 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type();
2348 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true);
2349 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);;
2350 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true);
2351 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false);
2352 _double_keccak_Type = make_double_keccak_Type();
2353 _multiplyToLen_Type = make_multiplyToLen_Type();
2354 _montgomeryMultiply_Type = make_montgomeryMultiply_Type();
2355 _montgomerySquare_Type = make_montgomerySquare_Type();
2356 _squareToLen_Type = make_squareToLen_Type();
2357 _mulAdd_Type = make_mulAdd_Type();
2358 _bigIntegerShift_Type = make_bigIntegerShift_Type();
2359 _vectorizedMismatch_Type = make_vectorizedMismatch_Type();
2360 _ghash_processBlocks_Type = make_ghash_processBlocks_Type();
2361 _chacha20Block_Type = make_chacha20Block_Type();
2362 _kyberNtt_Type = make_kyberNtt_Type();
2363 _kyberInverseNtt_Type = make_kyberInverseNtt_Type();
2364 _kyberNttMult_Type = make_kyberNttMult_Type();
2365 _kyberAddPoly_2_Type = make_kyberAddPoly_2_Type();
2366 _kyberAddPoly_3_Type = make_kyberAddPoly_3_Type();
2367 _kyber12To16_Type = make_kyber12To16_Type();
2368 _kyberBarrettReduce_Type = make_kyberBarrettReduce_Type();
2369 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type();
2370 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type();
2371 _dilithiumNttMult_Type = make_dilithiumNttMult_Type();
2372 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type();
2373 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type();
2374 _base64_encodeBlock_Type = make_base64_encodeBlock_Type();
2375 _base64_decodeBlock_Type = make_base64_decodeBlock_Type();
2376 _string_IndexOf_Type = make_string_IndexOf_Type();
2377 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type();
2378 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type();
2379 _intpoly_assign_Type = make_intpoly_assign_Type();
2380 _updateBytesCRC32_Type = make_updateBytesCRC32_Type();
2381 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type();
2382 _updateBytesAdler32_Type = make_updateBytesAdler32_Type();
2383 _osr_end_Type = make_osr_end_Type();
2384 _register_finalizer_Type = make_register_finalizer_Type();
2385 _vthread_transition_Type = make_vthread_transition_Type();
2386 JFR_ONLY(
2387 _class_id_load_barrier_Type = make_class_id_load_barrier_Type();
2388 )
2389 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type();
2390 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type();
2391 }
2392
2393 int trace_exception_counter = 0;
2394 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) {
2395 trace_exception_counter++;
2396 stringStream tempst;
2397
2398 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg);
2399 exception_oop->print_value_on(&tempst);
2400 tempst.print(" in ");
2401 CodeBlob* blob = CodeCache::find_blob(exception_pc);
2402 if (blob->is_nmethod()) {
2403 blob->as_nmethod()->method()->print_value_on(&tempst);
2404 } else if (blob->is_runtime_stub()) {
2405 tempst.print("<runtime-stub>");
2406 } else {
2407 tempst.print("<unknown>");
2408 }
2409 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc));
2410 tempst.print("]");
2411
2412 st->print_raw_cr(tempst.freeze());
2413 }
2414
2415 #define DO_COUNTERS2(macro2, macro1) \
2416 macro2(OptoRuntime, new_instance_C) \
2417 macro2(OptoRuntime, new_array_C) \
2418 macro2(OptoRuntime, new_array_nozero_C) \
2419 macro2(OptoRuntime, multianewarray2_C) \
2420 macro2(OptoRuntime, multianewarray3_C) \
2421 macro2(OptoRuntime, multianewarray4_C) \
2422 macro2(OptoRuntime, multianewarrayN_C) \
2423 macro2(OptoRuntime, monitor_notify_C) \
2424 macro2(OptoRuntime, monitor_notifyAll_C) \
2425 macro2(OptoRuntime, handle_exception_C_helper) \
2426 macro2(OptoRuntime, register_finalizer_C) \
2427 macro2(OptoRuntime, class_init_barrier_C) \
2428 macro1(OptoRuntime, class_init_barrier_redundant)
2429
2430 #define INIT_COUNTER_TIME_AND_CNT(sub, name) \
2431 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \
2432 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
2433
2434 #define INIT_COUNTER_CNT(sub, name) \
2435 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
2436
2437 void OptoRuntime::init_counters() {
2438 assert(CompilerConfig::is_c2_enabled(), "");
2439
2440 if (UsePerfData) {
2441 EXCEPTION_MARK;
2442
2443 DO_COUNTERS2(INIT_COUNTER_TIME_AND_CNT, INIT_COUNTER_CNT)
2444
2445 if (HAS_PENDING_EXCEPTION) {
2446 vm_exit_during_initialization("jvm_perf_init failed unexpectedly");
2447 }
2448 }
2449 }
2450 #undef INIT_COUNTER_TIME_AND_CNT
2451 #undef INIT_COUNTER_CNT
2452
2453 #define PRINT_COUNTER_TIME_AND_CNT(sub, name) { \
2454 jlong count = _perf_##sub##_##name##_count->get_value(); \
2455 if (count > 0) { \
2456 st->print_cr(" %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \
2457 _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \
2458 _perf_##sub##_##name##_timer->thread_counter_value_us(), \
2459 count); \
2460 }}
2461
2462 #define PRINT_COUNTER_CNT(sub, name) { \
2463 jlong count = _perf_##sub##_##name##_count->get_value(); \
2464 if (count > 0) { \
2465 st->print_cr(" %-30s = " JLONG_FORMAT_W(5) " events", #name, count); \
2466 }}
2467
2468 void OptoRuntime::print_counters_on(outputStream* st) {
2469 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c2_enabled()) {
2470 DO_COUNTERS2(PRINT_COUNTER_TIME_AND_CNT, PRINT_COUNTER_CNT)
2471 } else {
2472 st->print_cr(" OptoRuntime: no info (%s is disabled)",
2473 (!CompilerConfig::is_c2_enabled() ? "C2" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")));
2474 }
2475 }
2476
2477 #undef PRINT_COUNTER_TIME_AND_CNT
2478 #undef PRINT_COUNTER_CNT
2479 #undef DO_COUNTERS2