1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/vmClasses.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "code/scopeDesc.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "compiler/compilationMemoryStatistic.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/oopMap.hpp" 36 #include "gc/g1/g1HeapRegion.hpp" 37 #include "gc/shared/barrierSet.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/gcLocker.hpp" 40 #include "interpreter/bytecode.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "interpreter/linkResolver.hpp" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/oopFactory.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/objArrayKlass.hpp" 48 #include "oops/klass.inline.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "oops/typeArrayOop.inline.hpp" 51 #include "opto/ad.hpp" 52 #include "opto/addnode.hpp" 53 #include "opto/callnode.hpp" 54 #include "opto/cfgnode.hpp" 55 #include "opto/graphKit.hpp" 56 #include "opto/machnode.hpp" 57 #include "opto/matcher.hpp" 58 #include "opto/memnode.hpp" 59 #include "opto/mulnode.hpp" 60 #include "opto/output.hpp" 61 #include "opto/runtime.hpp" 62 #include "opto/subnode.hpp" 63 #include "prims/jvmtiExport.hpp" 64 #include "runtime/atomic.hpp" 65 #include "runtime/frame.inline.hpp" 66 #include "runtime/handles.inline.hpp" 67 #include "runtime/interfaceSupport.inline.hpp" 68 #include "runtime/javaCalls.hpp" 69 #include "runtime/sharedRuntime.hpp" 70 #include "runtime/signature.hpp" 71 #include "runtime/stackWatermarkSet.hpp" 72 #include "runtime/synchronizer.hpp" 73 #include "runtime/threadWXSetters.inline.hpp" 74 #include "runtime/vframe.hpp" 75 #include "runtime/vframeArray.hpp" 76 #include "runtime/vframe_hp.hpp" 77 #include "utilities/copy.hpp" 78 #include "utilities/preserveException.hpp" 79 80 81 // For debugging purposes: 82 // To force FullGCALot inside a runtime function, add the following two lines 83 // 84 // Universe::release_fullgc_alot_dummy(); 85 // Universe::heap()->collect(); 86 // 87 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 88 89 90 #define C2_BLOB_FIELD_DEFINE(name, type) \ 91 type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; 92 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 93 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \ 94 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; 95 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \ 96 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr; 97 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE) 98 #undef C2_BLOB_FIELD_DEFINE 99 #undef C2_STUB_FIELD_DEFINE 100 #undef C2_JVMTI_STUB_FIELD_DEFINE 101 102 #define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", 103 #define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, 104 #define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, 105 const char* OptoRuntime::_stub_names[] = { 106 C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) 107 }; 108 #undef C2_BLOB_NAME_DEFINE 109 #undef C2_STUB_NAME_DEFINE 110 #undef C2_JVMTI_STUB_NAME_DEFINE 111 112 // This should be called in an assertion at the start of OptoRuntime routines 113 // which are entered from compiled code (all of them) 114 #ifdef ASSERT 115 static bool check_compiled_frame(JavaThread* thread) { 116 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 117 RegisterMap map(thread, 118 RegisterMap::UpdateMap::skip, 119 RegisterMap::ProcessFrames::include, 120 RegisterMap::WalkContinuation::skip); 121 frame caller = thread->last_frame().sender(&map); 122 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 123 return true; 124 } 125 #endif // ASSERT 126 127 /* 128 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ 129 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ 130 if (var == nullptr) { return false; } 131 */ 132 133 #define GEN_C2_BLOB(name, type) \ 134 BLOB_FIELD_NAME(name) = \ 135 generate_ ## name ## _blob(); \ 136 if (BLOB_FIELD_NAME(name) == nullptr) { return false; } 137 138 // a few helper macros to conjure up generate_stub call arguments 139 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 140 #define C2_STUB_TYPEFUNC(name) name ## _Type 141 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) 142 #define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) 143 144 // Almost all the C functions targeted from the generated stubs are 145 // implemented locally to OptoRuntime with names that can be generated 146 // from the stub name by appending suffix '_C'. However, in two cases 147 // a common target method also needs to be called from shared runtime 148 // stubs. In these two cases the opto stubs rely on method 149 // imlementations defined in class SharedRuntime. The following 150 // defines temporarily rebind the generated names to reference the 151 // relevant implementations. 152 153 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \ 154 C2_STUB_FIELD_NAME(name) = \ 155 generate_stub(env, \ 156 C2_STUB_TYPEFUNC(name), \ 157 C2_STUB_C_FUNC(name), \ 158 C2_STUB_NAME(name), \ 159 fancy_jump, \ 160 pass_tls, \ 161 pass_retpc); \ 162 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \ 163 164 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name) 165 166 #define GEN_C2_JVMTI_STUB(name) \ 167 STUB_FIELD_NAME(name) = \ 168 generate_stub(env, \ 169 notify_jvmti_vthread_Type, \ 170 C2_JVMTI_STUB_C_FUNC(name), \ 171 C2_STUB_NAME(name), \ 172 0, \ 173 true, \ 174 false); \ 175 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \ 176 177 bool OptoRuntime::generate(ciEnv* env) { 178 179 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB) 180 181 return true; 182 } 183 184 #undef GEN_C2_BLOB 185 186 #undef C2_STUB_FIELD_NAME 187 #undef C2_STUB_TYPEFUNC 188 #undef C2_STUB_C_FUNC 189 #undef C2_STUB_NAME 190 #undef GEN_C2_STUB 191 192 #undef C2_JVMTI_STUB_C_FUNC 193 #undef GEN_C2_JVMTI_STUB 194 // #undef gen 195 196 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr; 197 const TypeFunc* OptoRuntime::_new_array_Type = nullptr; 198 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr; 199 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr; 200 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr; 201 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr; 202 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr; 203 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr; 204 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr; 205 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr; 206 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr; 207 const TypeFunc* OptoRuntime::_athrow_Type = nullptr; 208 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr; 209 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr; 210 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr; 211 const TypeFunc* OptoRuntime::_modf_Type = nullptr; 212 const TypeFunc* OptoRuntime::_l2f_Type = nullptr; 213 const TypeFunc* OptoRuntime::_void_long_Type = nullptr; 214 const TypeFunc* OptoRuntime::_void_void_Type = nullptr; 215 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr; 216 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr; 217 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr; 218 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr; 219 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr; 220 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr; 221 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr; 222 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr; 223 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr; 224 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr; 225 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr; 226 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr; 227 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr; 228 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr; 229 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr; 230 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr; 231 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr; 232 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr; 233 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr; 234 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr; 235 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr; 236 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr; 237 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr; 238 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr; 239 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr; 240 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr; 241 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr; 242 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr; 243 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr; 244 const TypeFunc* OptoRuntime::_kyberNtt_Type = nullptr; 245 const TypeFunc* OptoRuntime::_kyberInverseNtt_Type = nullptr; 246 const TypeFunc* OptoRuntime::_kyberNttMult_Type = nullptr; 247 const TypeFunc* OptoRuntime::_kyberAddPoly_2_Type = nullptr; 248 const TypeFunc* OptoRuntime::_kyberAddPoly_3_Type = nullptr; 249 const TypeFunc* OptoRuntime::_kyber12To16_Type = nullptr; 250 const TypeFunc* OptoRuntime::_kyberBarrettReduce_Type = nullptr; 251 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr; 252 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr; 253 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr; 254 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr; 255 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr; 256 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr; 257 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr; 258 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr; 259 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr; 260 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr; 261 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr; 262 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr; 263 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr; 264 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr; 265 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr; 266 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr; 267 #if INCLUDE_JFR 268 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr; 269 #endif // INCLUDE_JFR 270 #if INCLUDE_JVMTI 271 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type = nullptr; 272 #endif // INCLUDE_JVMTI 273 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr; 274 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr; 275 276 // Helper method to do generation of RunTimeStub's 277 address OptoRuntime::generate_stub(ciEnv* env, 278 TypeFunc_generator gen, address C_function, 279 const char *name, int is_fancy_jump, 280 bool pass_tls, 281 bool return_pc) { 282 283 // Matching the default directive, we currently have no method to match. 284 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization)); 285 CompilationMemoryStatisticMark cmsm(directive); 286 ResourceMark rm; 287 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive); 288 DirectivesStack::release(directive); 289 return C.stub_entry_point(); 290 } 291 292 const char* OptoRuntime::stub_name(address entry) { 293 #ifndef PRODUCT 294 CodeBlob* cb = CodeCache::find_blob(entry); 295 RuntimeStub* rs =(RuntimeStub *)cb; 296 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); 297 return rs->name(); 298 #else 299 // Fast implementation for product mode (maybe it should be inlined too) 300 return "runtime stub"; 301 #endif 302 } 303 304 // local methods passed as arguments to stub generator that forward 305 // control to corresponding JRT methods of SharedRuntime 306 307 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 308 oopDesc* dest, jint dest_pos, 309 jint length, JavaThread* thread) { 310 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread); 311 } 312 313 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) { 314 SharedRuntime::complete_monitor_locking_C(obj, lock, current); 315 } 316 317 318 //============================================================================= 319 // Opto compiler runtime routines 320 //============================================================================= 321 322 323 //=============================allocation====================================== 324 // We failed the fast-path allocation. Now we need to do a scavenge or GC 325 // and try allocation again. 326 327 // object allocation 328 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current)) 329 JRT_BLOCK; 330 #ifndef PRODUCT 331 SharedRuntime::_new_instance_ctr++; // new instance requires GC 332 #endif 333 assert(check_compiled_frame(current), "incorrect caller"); 334 335 // These checks are cheap to make and support reflective allocation. 336 int lh = klass->layout_helper(); 337 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 338 Handle holder(current, klass->klass_holder()); // keep the klass alive 339 klass->check_valid_for_instantiation(false, THREAD); 340 if (!HAS_PENDING_EXCEPTION) { 341 InstanceKlass::cast(klass)->initialize(THREAD); 342 } 343 } 344 345 if (!HAS_PENDING_EXCEPTION) { 346 // Scavenge and allocate an instance. 347 Handle holder(current, klass->klass_holder()); // keep the klass alive 348 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 349 current->set_vm_result_oop(result); 350 351 // Pass oops back through thread local storage. Our apparent type to Java 352 // is that we return an oop, but we can block on exit from this routine and 353 // a GC can trash the oop in C's return register. The generated stub will 354 // fetch the oop from TLS after any possible GC. 355 } 356 357 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 358 JRT_BLOCK_END; 359 360 // inform GC that we won't do card marks for initializing writes. 361 SharedRuntime::on_slowpath_allocation_exit(current); 362 JRT_END 363 364 365 // array allocation 366 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current)) 367 JRT_BLOCK; 368 #ifndef PRODUCT 369 SharedRuntime::_new_array_ctr++; // new array requires GC 370 #endif 371 assert(check_compiled_frame(current), "incorrect caller"); 372 373 // Scavenge and allocate an instance. 374 oop result; 375 376 if (array_type->is_typeArray_klass()) { 377 // The oopFactory likes to work with the element type. 378 // (We could bypass the oopFactory, since it doesn't add much value.) 379 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 380 result = oopFactory::new_typeArray(elem_type, len, THREAD); 381 } else { 382 // Although the oopFactory likes to work with the elem_type, 383 // the compiler prefers the array_type, since it must already have 384 // that latter value in hand for the fast path. 385 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 386 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 387 result = oopFactory::new_objArray(elem_type, len, THREAD); 388 } 389 390 // Pass oops back through thread local storage. Our apparent type to Java 391 // is that we return an oop, but we can block on exit from this routine and 392 // a GC can trash the oop in C's return register. The generated stub will 393 // fetch the oop from TLS after any possible GC. 394 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 395 current->set_vm_result_oop(result); 396 JRT_BLOCK_END; 397 398 // inform GC that we won't do card marks for initializing writes. 399 SharedRuntime::on_slowpath_allocation_exit(current); 400 JRT_END 401 402 // array allocation without zeroing 403 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current)) 404 JRT_BLOCK; 405 #ifndef PRODUCT 406 SharedRuntime::_new_array_ctr++; // new array requires GC 407 #endif 408 assert(check_compiled_frame(current), "incorrect caller"); 409 410 // Scavenge and allocate an instance. 411 oop result; 412 413 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 414 // The oopFactory likes to work with the element type. 415 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 416 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 417 418 // Pass oops back through thread local storage. Our apparent type to Java 419 // is that we return an oop, but we can block on exit from this routine and 420 // a GC can trash the oop in C's return register. The generated stub will 421 // fetch the oop from TLS after any possible GC. 422 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 423 current->set_vm_result_oop(result); 424 JRT_BLOCK_END; 425 426 427 // inform GC that we won't do card marks for initializing writes. 428 SharedRuntime::on_slowpath_allocation_exit(current); 429 430 oop result = current->vm_result_oop(); 431 if ((len > 0) && (result != nullptr) && 432 is_deoptimized_caller_frame(current)) { 433 // Zero array here if the caller is deoptimized. 434 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); 435 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 436 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); 437 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); 438 HeapWord* obj = cast_from_oop<HeapWord*>(result); 439 if (!is_aligned(hs_bytes, BytesPerLong)) { 440 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0; 441 hs_bytes += BytesPerInt; 442 } 443 444 // Optimized zeroing. 445 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); 446 const size_t aligned_hs = hs_bytes / BytesPerLong; 447 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 448 } 449 450 JRT_END 451 452 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 453 454 // multianewarray for 2 dimensions 455 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current)) 456 #ifndef PRODUCT 457 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 458 #endif 459 assert(check_compiled_frame(current), "incorrect caller"); 460 assert(elem_type->is_klass(), "not a class"); 461 jint dims[2]; 462 dims[0] = len1; 463 dims[1] = len2; 464 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 465 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 466 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 467 current->set_vm_result_oop(obj); 468 JRT_END 469 470 // multianewarray for 3 dimensions 471 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current)) 472 #ifndef PRODUCT 473 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 474 #endif 475 assert(check_compiled_frame(current), "incorrect caller"); 476 assert(elem_type->is_klass(), "not a class"); 477 jint dims[3]; 478 dims[0] = len1; 479 dims[1] = len2; 480 dims[2] = len3; 481 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 482 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 483 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 484 current->set_vm_result_oop(obj); 485 JRT_END 486 487 // multianewarray for 4 dimensions 488 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current)) 489 #ifndef PRODUCT 490 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 491 #endif 492 assert(check_compiled_frame(current), "incorrect caller"); 493 assert(elem_type->is_klass(), "not a class"); 494 jint dims[4]; 495 dims[0] = len1; 496 dims[1] = len2; 497 dims[2] = len3; 498 dims[3] = len4; 499 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 500 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 501 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 502 current->set_vm_result_oop(obj); 503 JRT_END 504 505 // multianewarray for 5 dimensions 506 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current)) 507 #ifndef PRODUCT 508 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 509 #endif 510 assert(check_compiled_frame(current), "incorrect caller"); 511 assert(elem_type->is_klass(), "not a class"); 512 jint dims[5]; 513 dims[0] = len1; 514 dims[1] = len2; 515 dims[2] = len3; 516 dims[3] = len4; 517 dims[4] = len5; 518 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 519 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 520 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 521 current->set_vm_result_oop(obj); 522 JRT_END 523 524 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current)) 525 assert(check_compiled_frame(current), "incorrect caller"); 526 assert(elem_type->is_klass(), "not a class"); 527 assert(oop(dims)->is_typeArray(), "not an array"); 528 529 ResourceMark rm; 530 jint len = dims->length(); 531 assert(len > 0, "Dimensions array should contain data"); 532 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 533 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 534 c_dims, len); 535 536 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 537 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 538 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 539 current->set_vm_result_oop(obj); 540 JRT_END 541 542 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current)) 543 544 // Very few notify/notifyAll operations find any threads on the waitset, so 545 // the dominant fast-path is to simply return. 546 // Relatedly, it's critical that notify/notifyAll be fast in order to 547 // reduce lock hold times. 548 if (!SafepointSynchronize::is_synchronizing()) { 549 if (ObjectSynchronizer::quick_notify(obj, current, false)) { 550 return; 551 } 552 } 553 554 // This is the case the fast-path above isn't provisioned to handle. 555 // The fast-path is designed to handle frequently arising cases in an efficient manner. 556 // (The fast-path is just a degenerate variant of the slow-path). 557 // Perform the dreaded state transition and pass control into the slow-path. 558 JRT_BLOCK; 559 Handle h_obj(current, obj); 560 ObjectSynchronizer::notify(h_obj, CHECK); 561 JRT_BLOCK_END; 562 JRT_END 563 564 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current)) 565 566 if (!SafepointSynchronize::is_synchronizing() ) { 567 if (ObjectSynchronizer::quick_notify(obj, current, true)) { 568 return; 569 } 570 } 571 572 // This is the case the fast-path above isn't provisioned to handle. 573 // The fast-path is designed to handle frequently arising cases in an efficient manner. 574 // (The fast-path is just a degenerate variant of the slow-path). 575 // Perform the dreaded state transition and pass control into the slow-path. 576 JRT_BLOCK; 577 Handle h_obj(current, obj); 578 ObjectSynchronizer::notifyall(h_obj, CHECK); 579 JRT_BLOCK_END; 580 JRT_END 581 582 static const TypeFunc* make_new_instance_Type() { 583 // create input type (domain) 584 const Type **fields = TypeTuple::fields(1); 585 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 586 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 587 588 // create result type (range) 589 fields = TypeTuple::fields(1); 590 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 591 592 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 593 594 return TypeFunc::make(domain, range); 595 } 596 597 #if INCLUDE_JVMTI 598 static const TypeFunc* make_notify_jvmti_vthread_Type() { 599 // create input type (domain) 600 const Type **fields = TypeTuple::fields(2); 601 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop 602 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean 603 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 604 605 // no result type needed 606 fields = TypeTuple::fields(1); 607 fields[TypeFunc::Parms+0] = nullptr; // void 608 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 609 610 return TypeFunc::make(domain,range); 611 } 612 #endif 613 614 static const TypeFunc* make_athrow_Type() { 615 // create input type (domain) 616 const Type **fields = TypeTuple::fields(1); 617 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 618 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 619 620 // create result type (range) 621 fields = TypeTuple::fields(0); 622 623 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 624 625 return TypeFunc::make(domain, range); 626 } 627 628 static const TypeFunc* make_new_array_Type() { 629 // create input type (domain) 630 const Type **fields = TypeTuple::fields(2); 631 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 632 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 633 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 634 635 // create result type (range) 636 fields = TypeTuple::fields(1); 637 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 638 639 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 640 641 return TypeFunc::make(domain, range); 642 } 643 644 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) { 645 // create input type (domain) 646 const int nargs = ndim + 1; 647 const Type **fields = TypeTuple::fields(nargs); 648 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 649 for( int i = 1; i < nargs; i++ ) 650 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 651 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 652 653 // create result type (range) 654 fields = TypeTuple::fields(1); 655 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 656 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 657 658 return TypeFunc::make(domain, range); 659 } 660 661 static const TypeFunc* make_multianewarrayN_Type() { 662 // create input type (domain) 663 const Type **fields = TypeTuple::fields(2); 664 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 665 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 666 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 667 668 // create result type (range) 669 fields = TypeTuple::fields(1); 670 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 671 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 672 673 return TypeFunc::make(domain, range); 674 } 675 676 static const TypeFunc* make_uncommon_trap_Type() { 677 // create input type (domain) 678 const Type **fields = TypeTuple::fields(1); 679 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 680 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 681 682 // create result type (range) 683 fields = TypeTuple::fields(0); 684 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 685 686 return TypeFunc::make(domain, range); 687 } 688 689 //----------------------------------------------------------------------------- 690 // Monitor Handling 691 692 static const TypeFunc* make_complete_monitor_enter_Type() { 693 // create input type (domain) 694 const Type **fields = TypeTuple::fields(2); 695 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 696 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 697 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 698 699 // create result type (range) 700 fields = TypeTuple::fields(0); 701 702 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 703 704 return TypeFunc::make(domain,range); 705 } 706 707 //----------------------------------------------------------------------------- 708 709 static const TypeFunc* make_complete_monitor_exit_Type() { 710 // create input type (domain) 711 const Type **fields = TypeTuple::fields(3); 712 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 713 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 714 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 715 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 716 717 // create result type (range) 718 fields = TypeTuple::fields(0); 719 720 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 721 722 return TypeFunc::make(domain, range); 723 } 724 725 static const TypeFunc* make_monitor_notify_Type() { 726 // create input type (domain) 727 const Type **fields = TypeTuple::fields(1); 728 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 729 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 730 731 // create result type (range) 732 fields = TypeTuple::fields(0); 733 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 734 return TypeFunc::make(domain, range); 735 } 736 737 static const TypeFunc* make_flush_windows_Type() { 738 // create input type (domain) 739 const Type** fields = TypeTuple::fields(1); 740 fields[TypeFunc::Parms+0] = nullptr; // void 741 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 742 743 // create result type 744 fields = TypeTuple::fields(1); 745 fields[TypeFunc::Parms+0] = nullptr; // void 746 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 747 748 return TypeFunc::make(domain, range); 749 } 750 751 static const TypeFunc* make_l2f_Type() { 752 // create input type (domain) 753 const Type **fields = TypeTuple::fields(2); 754 fields[TypeFunc::Parms+0] = TypeLong::LONG; 755 fields[TypeFunc::Parms+1] = Type::HALF; 756 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 757 758 // create result type (range) 759 fields = TypeTuple::fields(1); 760 fields[TypeFunc::Parms+0] = Type::FLOAT; 761 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 762 763 return TypeFunc::make(domain, range); 764 } 765 766 static const TypeFunc* make_modf_Type() { 767 const Type **fields = TypeTuple::fields(2); 768 fields[TypeFunc::Parms+0] = Type::FLOAT; 769 fields[TypeFunc::Parms+1] = Type::FLOAT; 770 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 771 772 // create result type (range) 773 fields = TypeTuple::fields(1); 774 fields[TypeFunc::Parms+0] = Type::FLOAT; 775 776 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 777 778 return TypeFunc::make(domain, range); 779 } 780 781 static const TypeFunc* make_Math_D_D_Type() { 782 // create input type (domain) 783 const Type **fields = TypeTuple::fields(2); 784 // Symbol* name of class to be loaded 785 fields[TypeFunc::Parms+0] = Type::DOUBLE; 786 fields[TypeFunc::Parms+1] = Type::HALF; 787 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 788 789 // create result type (range) 790 fields = TypeTuple::fields(2); 791 fields[TypeFunc::Parms+0] = Type::DOUBLE; 792 fields[TypeFunc::Parms+1] = Type::HALF; 793 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 794 795 return TypeFunc::make(domain, range); 796 } 797 798 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) { 799 // create input type (domain) 800 const Type **fields = TypeTuple::fields(num_arg); 801 // Symbol* name of class to be loaded 802 assert(num_arg > 0, "must have at least 1 input"); 803 for (uint i = 0; i < num_arg; i++) { 804 fields[TypeFunc::Parms+i] = in_type; 805 } 806 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields); 807 808 // create result type (range) 809 const uint num_ret = 1; 810 fields = TypeTuple::fields(num_ret); 811 fields[TypeFunc::Parms+0] = out_type; 812 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields); 813 814 return TypeFunc::make(domain, range); 815 } 816 817 static const TypeFunc* make_Math_DD_D_Type() { 818 const Type **fields = TypeTuple::fields(4); 819 fields[TypeFunc::Parms+0] = Type::DOUBLE; 820 fields[TypeFunc::Parms+1] = Type::HALF; 821 fields[TypeFunc::Parms+2] = Type::DOUBLE; 822 fields[TypeFunc::Parms+3] = Type::HALF; 823 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 824 825 // create result type (range) 826 fields = TypeTuple::fields(2); 827 fields[TypeFunc::Parms+0] = Type::DOUBLE; 828 fields[TypeFunc::Parms+1] = Type::HALF; 829 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 830 831 return TypeFunc::make(domain, range); 832 } 833 834 //-------------- currentTimeMillis, currentTimeNanos, etc 835 836 static const TypeFunc* make_void_long_Type() { 837 // create input type (domain) 838 const Type **fields = TypeTuple::fields(0); 839 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 840 841 // create result type (range) 842 fields = TypeTuple::fields(2); 843 fields[TypeFunc::Parms+0] = TypeLong::LONG; 844 fields[TypeFunc::Parms+1] = Type::HALF; 845 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 846 847 return TypeFunc::make(domain, range); 848 } 849 850 static const TypeFunc* make_void_void_Type() { 851 // create input type (domain) 852 const Type **fields = TypeTuple::fields(0); 853 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 854 855 // create result type (range) 856 fields = TypeTuple::fields(0); 857 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 858 return TypeFunc::make(domain, range); 859 } 860 861 static const TypeFunc* make_jfr_write_checkpoint_Type() { 862 // create input type (domain) 863 const Type **fields = TypeTuple::fields(0); 864 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 865 866 // create result type (range) 867 fields = TypeTuple::fields(0); 868 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 869 return TypeFunc::make(domain, range); 870 } 871 872 873 // Takes as parameters: 874 // void *dest 875 // long size 876 // uchar byte 877 878 static const TypeFunc* make_setmemory_Type() { 879 // create input type (domain) 880 int argcnt = NOT_LP64(3) LP64_ONLY(4); 881 const Type** fields = TypeTuple::fields(argcnt); 882 int argp = TypeFunc::Parms; 883 fields[argp++] = TypePtr::NOTNULL; // dest 884 fields[argp++] = TypeX_X; // size 885 LP64_ONLY(fields[argp++] = Type::HALF); // size 886 fields[argp++] = TypeInt::UBYTE; // bytevalue 887 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 888 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 889 890 // no result type needed 891 fields = TypeTuple::fields(1); 892 fields[TypeFunc::Parms+0] = nullptr; // void 893 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 894 return TypeFunc::make(domain, range); 895 } 896 897 // arraycopy stub variations: 898 enum ArrayCopyType { 899 ac_fast, // void(ptr, ptr, size_t) 900 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 901 ac_slow, // void(ptr, int, ptr, int, int) 902 ac_generic // int(ptr, int, ptr, int, int) 903 }; 904 905 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 906 // create input type (domain) 907 int num_args = (act == ac_fast ? 3 : 5); 908 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 909 int argcnt = num_args; 910 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 911 const Type** fields = TypeTuple::fields(argcnt); 912 int argp = TypeFunc::Parms; 913 fields[argp++] = TypePtr::NOTNULL; // src 914 if (num_size_args == 0) { 915 fields[argp++] = TypeInt::INT; // src_pos 916 } 917 fields[argp++] = TypePtr::NOTNULL; // dest 918 if (num_size_args == 0) { 919 fields[argp++] = TypeInt::INT; // dest_pos 920 fields[argp++] = TypeInt::INT; // length 921 } 922 while (num_size_args-- > 0) { 923 fields[argp++] = TypeX_X; // size in whatevers (size_t) 924 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 925 } 926 if (act == ac_checkcast) { 927 fields[argp++] = TypePtr::NOTNULL; // super_klass 928 } 929 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 930 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 931 932 // create result type if needed 933 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 934 fields = TypeTuple::fields(1); 935 if (retcnt == 0) 936 fields[TypeFunc::Parms+0] = nullptr; // void 937 else 938 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 939 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 940 return TypeFunc::make(domain, range); 941 } 942 943 static const TypeFunc* make_array_fill_Type() { 944 const Type** fields; 945 int argp = TypeFunc::Parms; 946 // create input type (domain): pointer, int, size_t 947 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 948 fields[argp++] = TypePtr::NOTNULL; 949 fields[argp++] = TypeInt::INT; 950 fields[argp++] = TypeX_X; // size in whatevers (size_t) 951 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 952 const TypeTuple *domain = TypeTuple::make(argp, fields); 953 954 // create result type 955 fields = TypeTuple::fields(1); 956 fields[TypeFunc::Parms+0] = nullptr; // void 957 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 958 959 return TypeFunc::make(domain, range); 960 } 961 962 static const TypeFunc* make_array_partition_Type() { 963 // create input type (domain) 964 int num_args = 7; 965 int argcnt = num_args; 966 const Type** fields = TypeTuple::fields(argcnt); 967 int argp = TypeFunc::Parms; 968 fields[argp++] = TypePtr::NOTNULL; // array 969 fields[argp++] = TypeInt::INT; // element type 970 fields[argp++] = TypeInt::INT; // low 971 fields[argp++] = TypeInt::INT; // end 972 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array) 973 fields[argp++] = TypeInt::INT; // indexPivot1 974 fields[argp++] = TypeInt::INT; // indexPivot2 975 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 976 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 977 978 // no result type needed 979 fields = TypeTuple::fields(1); 980 fields[TypeFunc::Parms+0] = nullptr; // void 981 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 982 return TypeFunc::make(domain, range); 983 } 984 985 static const TypeFunc* make_array_sort_Type() { 986 // create input type (domain) 987 int num_args = 4; 988 int argcnt = num_args; 989 const Type** fields = TypeTuple::fields(argcnt); 990 int argp = TypeFunc::Parms; 991 fields[argp++] = TypePtr::NOTNULL; // array 992 fields[argp++] = TypeInt::INT; // element type 993 fields[argp++] = TypeInt::INT; // fromIndex 994 fields[argp++] = TypeInt::INT; // toIndex 995 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 996 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 997 998 // no result type needed 999 fields = TypeTuple::fields(1); 1000 fields[TypeFunc::Parms+0] = nullptr; // void 1001 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1002 return TypeFunc::make(domain, range); 1003 } 1004 1005 static const TypeFunc* make_aescrypt_block_Type() { 1006 // create input type (domain) 1007 int num_args = 3; 1008 int argcnt = num_args; 1009 const Type** fields = TypeTuple::fields(argcnt); 1010 int argp = TypeFunc::Parms; 1011 fields[argp++] = TypePtr::NOTNULL; // src 1012 fields[argp++] = TypePtr::NOTNULL; // dest 1013 fields[argp++] = TypePtr::NOTNULL; // k array 1014 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1015 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1016 1017 // no result type needed 1018 fields = TypeTuple::fields(1); 1019 fields[TypeFunc::Parms+0] = nullptr; // void 1020 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1021 return TypeFunc::make(domain, range); 1022 } 1023 1024 static const TypeFunc* make_updateBytesCRC32_Type() { 1025 // create input type (domain) 1026 int num_args = 3; 1027 int argcnt = num_args; 1028 const Type** fields = TypeTuple::fields(argcnt); 1029 int argp = TypeFunc::Parms; 1030 fields[argp++] = TypeInt::INT; // crc 1031 fields[argp++] = TypePtr::NOTNULL; // src 1032 fields[argp++] = TypeInt::INT; // len 1033 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1034 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1035 1036 // result type needed 1037 fields = TypeTuple::fields(1); 1038 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1039 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1040 return TypeFunc::make(domain, range); 1041 } 1042 1043 static const TypeFunc* make_updateBytesCRC32C_Type() { 1044 // create input type (domain) 1045 int num_args = 4; 1046 int argcnt = num_args; 1047 const Type** fields = TypeTuple::fields(argcnt); 1048 int argp = TypeFunc::Parms; 1049 fields[argp++] = TypeInt::INT; // crc 1050 fields[argp++] = TypePtr::NOTNULL; // buf 1051 fields[argp++] = TypeInt::INT; // len 1052 fields[argp++] = TypePtr::NOTNULL; // table 1053 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1054 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1055 1056 // result type needed 1057 fields = TypeTuple::fields(1); 1058 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1059 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1060 return TypeFunc::make(domain, range); 1061 } 1062 1063 static const TypeFunc* make_updateBytesAdler32_Type() { 1064 // create input type (domain) 1065 int num_args = 3; 1066 int argcnt = num_args; 1067 const Type** fields = TypeTuple::fields(argcnt); 1068 int argp = TypeFunc::Parms; 1069 fields[argp++] = TypeInt::INT; // crc 1070 fields[argp++] = TypePtr::NOTNULL; // src + offset 1071 fields[argp++] = TypeInt::INT; // len 1072 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1073 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1074 1075 // result type needed 1076 fields = TypeTuple::fields(1); 1077 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1078 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1079 return TypeFunc::make(domain, range); 1080 } 1081 1082 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() { 1083 // create input type (domain) 1084 int num_args = 5; 1085 int argcnt = num_args; 1086 const Type** fields = TypeTuple::fields(argcnt); 1087 int argp = TypeFunc::Parms; 1088 fields[argp++] = TypePtr::NOTNULL; // src 1089 fields[argp++] = TypePtr::NOTNULL; // dest 1090 fields[argp++] = TypePtr::NOTNULL; // k array 1091 fields[argp++] = TypePtr::NOTNULL; // r array 1092 fields[argp++] = TypeInt::INT; // src len 1093 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1094 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1095 1096 // returning cipher len (int) 1097 fields = TypeTuple::fields(1); 1098 fields[TypeFunc::Parms+0] = TypeInt::INT; 1099 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1100 return TypeFunc::make(domain, range); 1101 } 1102 1103 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() { 1104 // create input type (domain) 1105 int num_args = 4; 1106 int argcnt = num_args; 1107 const Type** fields = TypeTuple::fields(argcnt); 1108 int argp = TypeFunc::Parms; 1109 fields[argp++] = TypePtr::NOTNULL; // src 1110 fields[argp++] = TypePtr::NOTNULL; // dest 1111 fields[argp++] = TypePtr::NOTNULL; // k array 1112 fields[argp++] = TypeInt::INT; // src len 1113 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1114 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1115 1116 // returning cipher len (int) 1117 fields = TypeTuple::fields(1); 1118 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1119 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1120 return TypeFunc::make(domain, range); 1121 } 1122 1123 static const TypeFunc* make_counterMode_aescrypt_Type() { 1124 // create input type (domain) 1125 int num_args = 7; 1126 int argcnt = num_args; 1127 const Type** fields = TypeTuple::fields(argcnt); 1128 int argp = TypeFunc::Parms; 1129 fields[argp++] = TypePtr::NOTNULL; // src 1130 fields[argp++] = TypePtr::NOTNULL; // dest 1131 fields[argp++] = TypePtr::NOTNULL; // k array 1132 fields[argp++] = TypePtr::NOTNULL; // counter array 1133 fields[argp++] = TypeInt::INT; // src len 1134 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 1135 fields[argp++] = TypePtr::NOTNULL; // saved used addr 1136 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1137 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1138 // returning cipher len (int) 1139 fields = TypeTuple::fields(1); 1140 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1141 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1142 return TypeFunc::make(domain, range); 1143 } 1144 1145 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() { 1146 // create input type (domain) 1147 int num_args = 8; 1148 int argcnt = num_args; 1149 const Type** fields = TypeTuple::fields(argcnt); 1150 int argp = TypeFunc::Parms; 1151 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs 1152 fields[argp++] = TypeInt::INT; // int len 1153 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs 1154 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs 1155 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj 1156 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj 1157 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj 1158 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj 1159 1160 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1161 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1162 // returning cipher len (int) 1163 fields = TypeTuple::fields(1); 1164 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1165 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1166 return TypeFunc::make(domain, range); 1167 } 1168 1169 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) { 1170 // create input type (domain) 1171 int num_args = is_sha3 ? 3 : 2; 1172 int argcnt = num_args; 1173 const Type** fields = TypeTuple::fields(argcnt); 1174 int argp = TypeFunc::Parms; 1175 fields[argp++] = TypePtr::NOTNULL; // buf 1176 fields[argp++] = TypePtr::NOTNULL; // state 1177 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1178 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1179 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1180 1181 // no result type needed 1182 fields = TypeTuple::fields(1); 1183 fields[TypeFunc::Parms+0] = nullptr; // void 1184 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1185 return TypeFunc::make(domain, range); 1186 } 1187 1188 /* 1189 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 1190 */ 1191 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) { 1192 // create input type (domain) 1193 int num_args = is_sha3 ? 5 : 4; 1194 int argcnt = num_args; 1195 const Type** fields = TypeTuple::fields(argcnt); 1196 int argp = TypeFunc::Parms; 1197 fields[argp++] = TypePtr::NOTNULL; // buf 1198 fields[argp++] = TypePtr::NOTNULL; // state 1199 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1200 fields[argp++] = TypeInt::INT; // ofs 1201 fields[argp++] = TypeInt::INT; // limit 1202 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1203 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1204 1205 // returning ofs (int) 1206 fields = TypeTuple::fields(1); 1207 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1208 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1209 return TypeFunc::make(domain, range); 1210 } 1211 1212 // SHAKE128Parallel doubleKeccak function 1213 static const TypeFunc* make_double_keccak_Type() { 1214 int argcnt = 2; 1215 1216 const Type** fields = TypeTuple::fields(argcnt); 1217 int argp = TypeFunc::Parms; 1218 fields[argp++] = TypePtr::NOTNULL; // status0 1219 fields[argp++] = TypePtr::NOTNULL; // status1 1220 1221 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1222 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1223 1224 // result type needed 1225 fields = TypeTuple::fields(1); 1226 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1227 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1228 return TypeFunc::make(domain, range); 1229 } 1230 1231 static const TypeFunc* make_multiplyToLen_Type() { 1232 // create input type (domain) 1233 int num_args = 5; 1234 int argcnt = num_args; 1235 const Type** fields = TypeTuple::fields(argcnt); 1236 int argp = TypeFunc::Parms; 1237 fields[argp++] = TypePtr::NOTNULL; // x 1238 fields[argp++] = TypeInt::INT; // xlen 1239 fields[argp++] = TypePtr::NOTNULL; // y 1240 fields[argp++] = TypeInt::INT; // ylen 1241 fields[argp++] = TypePtr::NOTNULL; // z 1242 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1243 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1244 1245 // no result type needed 1246 fields = TypeTuple::fields(1); 1247 fields[TypeFunc::Parms+0] = nullptr; 1248 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1249 return TypeFunc::make(domain, range); 1250 } 1251 1252 static const TypeFunc* make_squareToLen_Type() { 1253 // create input type (domain) 1254 int num_args = 4; 1255 int argcnt = num_args; 1256 const Type** fields = TypeTuple::fields(argcnt); 1257 int argp = TypeFunc::Parms; 1258 fields[argp++] = TypePtr::NOTNULL; // x 1259 fields[argp++] = TypeInt::INT; // len 1260 fields[argp++] = TypePtr::NOTNULL; // z 1261 fields[argp++] = TypeInt::INT; // zlen 1262 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1263 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1264 1265 // no result type needed 1266 fields = TypeTuple::fields(1); 1267 fields[TypeFunc::Parms+0] = nullptr; 1268 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1269 return TypeFunc::make(domain, range); 1270 } 1271 1272 static const TypeFunc* make_mulAdd_Type() { 1273 // create input type (domain) 1274 int num_args = 5; 1275 int argcnt = num_args; 1276 const Type** fields = TypeTuple::fields(argcnt); 1277 int argp = TypeFunc::Parms; 1278 fields[argp++] = TypePtr::NOTNULL; // out 1279 fields[argp++] = TypePtr::NOTNULL; // in 1280 fields[argp++] = TypeInt::INT; // offset 1281 fields[argp++] = TypeInt::INT; // len 1282 fields[argp++] = TypeInt::INT; // k 1283 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1284 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1285 1286 // returning carry (int) 1287 fields = TypeTuple::fields(1); 1288 fields[TypeFunc::Parms+0] = TypeInt::INT; 1289 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1290 return TypeFunc::make(domain, range); 1291 } 1292 1293 static const TypeFunc* make_montgomeryMultiply_Type() { 1294 // create input type (domain) 1295 int num_args = 7; 1296 int argcnt = num_args; 1297 const Type** fields = TypeTuple::fields(argcnt); 1298 int argp = TypeFunc::Parms; 1299 fields[argp++] = TypePtr::NOTNULL; // a 1300 fields[argp++] = TypePtr::NOTNULL; // b 1301 fields[argp++] = TypePtr::NOTNULL; // n 1302 fields[argp++] = TypeInt::INT; // len 1303 fields[argp++] = TypeLong::LONG; // inv 1304 fields[argp++] = Type::HALF; 1305 fields[argp++] = TypePtr::NOTNULL; // result 1306 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1307 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1308 1309 // result type needed 1310 fields = TypeTuple::fields(1); 1311 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1312 1313 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1314 return TypeFunc::make(domain, range); 1315 } 1316 1317 static const TypeFunc* make_montgomerySquare_Type() { 1318 // create input type (domain) 1319 int num_args = 6; 1320 int argcnt = num_args; 1321 const Type** fields = TypeTuple::fields(argcnt); 1322 int argp = TypeFunc::Parms; 1323 fields[argp++] = TypePtr::NOTNULL; // a 1324 fields[argp++] = TypePtr::NOTNULL; // n 1325 fields[argp++] = TypeInt::INT; // len 1326 fields[argp++] = TypeLong::LONG; // inv 1327 fields[argp++] = Type::HALF; 1328 fields[argp++] = TypePtr::NOTNULL; // result 1329 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1330 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1331 1332 // result type needed 1333 fields = TypeTuple::fields(1); 1334 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1335 1336 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1337 return TypeFunc::make(domain, range); 1338 } 1339 1340 static const TypeFunc* make_bigIntegerShift_Type() { 1341 int argcnt = 5; 1342 const Type** fields = TypeTuple::fields(argcnt); 1343 int argp = TypeFunc::Parms; 1344 fields[argp++] = TypePtr::NOTNULL; // newArr 1345 fields[argp++] = TypePtr::NOTNULL; // oldArr 1346 fields[argp++] = TypeInt::INT; // newIdx 1347 fields[argp++] = TypeInt::INT; // shiftCount 1348 fields[argp++] = TypeInt::INT; // numIter 1349 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1350 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1351 1352 // no result type needed 1353 fields = TypeTuple::fields(1); 1354 fields[TypeFunc::Parms + 0] = nullptr; 1355 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1356 return TypeFunc::make(domain, range); 1357 } 1358 1359 static const TypeFunc* make_vectorizedMismatch_Type() { 1360 // create input type (domain) 1361 int num_args = 4; 1362 int argcnt = num_args; 1363 const Type** fields = TypeTuple::fields(argcnt); 1364 int argp = TypeFunc::Parms; 1365 fields[argp++] = TypePtr::NOTNULL; // obja 1366 fields[argp++] = TypePtr::NOTNULL; // objb 1367 fields[argp++] = TypeInt::INT; // length, number of elements 1368 fields[argp++] = TypeInt::INT; // log2scale, element size 1369 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1370 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1371 1372 //return mismatch index (int) 1373 fields = TypeTuple::fields(1); 1374 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1375 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1376 return TypeFunc::make(domain, range); 1377 } 1378 1379 static const TypeFunc* make_ghash_processBlocks_Type() { 1380 int argcnt = 4; 1381 1382 const Type** fields = TypeTuple::fields(argcnt); 1383 int argp = TypeFunc::Parms; 1384 fields[argp++] = TypePtr::NOTNULL; // state 1385 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1386 fields[argp++] = TypePtr::NOTNULL; // data 1387 fields[argp++] = TypeInt::INT; // blocks 1388 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1389 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1390 1391 // result type needed 1392 fields = TypeTuple::fields(1); 1393 fields[TypeFunc::Parms+0] = nullptr; // void 1394 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1395 return TypeFunc::make(domain, range); 1396 } 1397 1398 static const TypeFunc* make_chacha20Block_Type() { 1399 int argcnt = 2; 1400 1401 const Type** fields = TypeTuple::fields(argcnt); 1402 int argp = TypeFunc::Parms; 1403 fields[argp++] = TypePtr::NOTNULL; // state 1404 fields[argp++] = TypePtr::NOTNULL; // result 1405 1406 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1407 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1408 1409 // result type needed 1410 fields = TypeTuple::fields(1); 1411 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int 1412 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1413 return TypeFunc::make(domain, range); 1414 } 1415 1416 // Kyber NTT function 1417 static const TypeFunc* make_kyberNtt_Type() { 1418 int argcnt = 2; 1419 1420 const Type** fields = TypeTuple::fields(argcnt); 1421 int argp = TypeFunc::Parms; 1422 fields[argp++] = TypePtr::NOTNULL; // coeffs 1423 fields[argp++] = TypePtr::NOTNULL; // NTT zetas 1424 1425 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1426 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1427 1428 // result type needed 1429 fields = TypeTuple::fields(1); 1430 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1431 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1432 return TypeFunc::make(domain, range); 1433 } 1434 1435 // Kyber inverse NTT function 1436 static const TypeFunc* make_kyberInverseNtt_Type() { 1437 int argcnt = 2; 1438 1439 const Type** fields = TypeTuple::fields(argcnt); 1440 int argp = TypeFunc::Parms; 1441 fields[argp++] = TypePtr::NOTNULL; // coeffs 1442 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas 1443 1444 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1445 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1446 1447 // result type needed 1448 fields = TypeTuple::fields(1); 1449 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1450 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1451 return TypeFunc::make(domain, range); 1452 } 1453 1454 // Kyber NTT multiply function 1455 static const TypeFunc* make_kyberNttMult_Type() { 1456 int argcnt = 4; 1457 1458 const Type** fields = TypeTuple::fields(argcnt); 1459 int argp = TypeFunc::Parms; 1460 fields[argp++] = TypePtr::NOTNULL; // result 1461 fields[argp++] = TypePtr::NOTNULL; // ntta 1462 fields[argp++] = TypePtr::NOTNULL; // nttb 1463 fields[argp++] = TypePtr::NOTNULL; // NTT multiply zetas 1464 1465 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1466 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1467 1468 // result type needed 1469 fields = TypeTuple::fields(1); 1470 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1471 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1472 return TypeFunc::make(domain, range); 1473 } 1474 // Kyber add 2 polynomials function 1475 static const TypeFunc* make_kyberAddPoly_2_Type() { 1476 int argcnt = 3; 1477 1478 const Type** fields = TypeTuple::fields(argcnt); 1479 int argp = TypeFunc::Parms; 1480 fields[argp++] = TypePtr::NOTNULL; // result 1481 fields[argp++] = TypePtr::NOTNULL; // a 1482 fields[argp++] = TypePtr::NOTNULL; // b 1483 1484 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1485 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1486 1487 // result type needed 1488 fields = TypeTuple::fields(1); 1489 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1490 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1491 return TypeFunc::make(domain, range); 1492 } 1493 1494 1495 // Kyber add 3 polynomials function 1496 static const TypeFunc* make_kyberAddPoly_3_Type() { 1497 int argcnt = 4; 1498 1499 const Type** fields = TypeTuple::fields(argcnt); 1500 int argp = TypeFunc::Parms; 1501 fields[argp++] = TypePtr::NOTNULL; // result 1502 fields[argp++] = TypePtr::NOTNULL; // a 1503 fields[argp++] = TypePtr::NOTNULL; // b 1504 fields[argp++] = TypePtr::NOTNULL; // c 1505 1506 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1507 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1508 1509 // result type needed 1510 fields = TypeTuple::fields(1); 1511 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1512 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1513 return TypeFunc::make(domain, range); 1514 } 1515 1516 1517 // Kyber XOF output parsing into polynomial coefficients candidates 1518 // or decompress(12,...) function 1519 static const TypeFunc* make_kyber12To16_Type() { 1520 int argcnt = 4; 1521 1522 const Type** fields = TypeTuple::fields(argcnt); 1523 int argp = TypeFunc::Parms; 1524 fields[argp++] = TypePtr::NOTNULL; // condensed 1525 fields[argp++] = TypeInt::INT; // condensedOffs 1526 fields[argp++] = TypePtr::NOTNULL; // parsed 1527 fields[argp++] = TypeInt::INT; // parsedLength 1528 1529 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1530 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1531 1532 // result type needed 1533 fields = TypeTuple::fields(1); 1534 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1535 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1536 return TypeFunc::make(domain, range); 1537 } 1538 1539 // Kyber Barrett reduce function 1540 static const TypeFunc* make_kyberBarrettReduce_Type() { 1541 int argcnt = 1; 1542 1543 const Type** fields = TypeTuple::fields(argcnt); 1544 int argp = TypeFunc::Parms; 1545 fields[argp++] = TypePtr::NOTNULL; // coeffs 1546 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1547 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1548 1549 // result type needed 1550 fields = TypeTuple::fields(1); 1551 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1552 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1553 return TypeFunc::make(domain, range); 1554 } 1555 1556 // Dilithium NTT function except for the final "normalization" to |coeff| < Q 1557 static const TypeFunc* make_dilithiumAlmostNtt_Type() { 1558 int argcnt = 2; 1559 1560 const Type** fields = TypeTuple::fields(argcnt); 1561 int argp = TypeFunc::Parms; 1562 fields[argp++] = TypePtr::NOTNULL; // coeffs 1563 fields[argp++] = TypePtr::NOTNULL; // NTT zetas 1564 1565 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1566 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1567 1568 // result type needed 1569 fields = TypeTuple::fields(1); 1570 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1571 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1572 return TypeFunc::make(domain, range); 1573 } 1574 1575 // Dilithium inverse NTT function except the final mod Q division by 2^256 1576 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() { 1577 int argcnt = 2; 1578 1579 const Type** fields = TypeTuple::fields(argcnt); 1580 int argp = TypeFunc::Parms; 1581 fields[argp++] = TypePtr::NOTNULL; // coeffs 1582 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas 1583 1584 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1585 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1586 1587 // result type needed 1588 fields = TypeTuple::fields(1); 1589 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1590 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1591 return TypeFunc::make(domain, range); 1592 } 1593 1594 // Dilithium NTT multiply function 1595 static const TypeFunc* make_dilithiumNttMult_Type() { 1596 int argcnt = 3; 1597 1598 const Type** fields = TypeTuple::fields(argcnt); 1599 int argp = TypeFunc::Parms; 1600 fields[argp++] = TypePtr::NOTNULL; // result 1601 fields[argp++] = TypePtr::NOTNULL; // ntta 1602 fields[argp++] = TypePtr::NOTNULL; // nttb 1603 1604 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1605 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1606 1607 // result type needed 1608 fields = TypeTuple::fields(1); 1609 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1610 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1611 return TypeFunc::make(domain, range); 1612 } 1613 1614 // Dilithium Montgomery multiply a polynome coefficient array by a constant 1615 static const TypeFunc* make_dilithiumMontMulByConstant_Type() { 1616 int argcnt = 2; 1617 1618 const Type** fields = TypeTuple::fields(argcnt); 1619 int argp = TypeFunc::Parms; 1620 fields[argp++] = TypePtr::NOTNULL; // coeffs 1621 fields[argp++] = TypeInt::INT; // constant multiplier 1622 1623 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1624 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1625 1626 // result type needed 1627 fields = TypeTuple::fields(1); 1628 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1629 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1630 return TypeFunc::make(domain, range); 1631 } 1632 1633 // Dilithium decompose polynomial 1634 static const TypeFunc* make_dilithiumDecomposePoly_Type() { 1635 int argcnt = 5; 1636 1637 const Type** fields = TypeTuple::fields(argcnt); 1638 int argp = TypeFunc::Parms; 1639 fields[argp++] = TypePtr::NOTNULL; // input 1640 fields[argp++] = TypePtr::NOTNULL; // lowPart 1641 fields[argp++] = TypePtr::NOTNULL; // highPart 1642 fields[argp++] = TypeInt::INT; // 2 * gamma2 1643 fields[argp++] = TypeInt::INT; // multiplier 1644 1645 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1646 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1647 1648 // result type needed 1649 fields = TypeTuple::fields(1); 1650 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1651 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1652 return TypeFunc::make(domain, range); 1653 } 1654 1655 static const TypeFunc* make_base64_encodeBlock_Type() { 1656 int argcnt = 6; 1657 1658 const Type** fields = TypeTuple::fields(argcnt); 1659 int argp = TypeFunc::Parms; 1660 fields[argp++] = TypePtr::NOTNULL; // src array 1661 fields[argp++] = TypeInt::INT; // offset 1662 fields[argp++] = TypeInt::INT; // length 1663 fields[argp++] = TypePtr::NOTNULL; // dest array 1664 fields[argp++] = TypeInt::INT; // dp 1665 fields[argp++] = TypeInt::BOOL; // isURL 1666 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1667 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1668 1669 // result type needed 1670 fields = TypeTuple::fields(1); 1671 fields[TypeFunc::Parms + 0] = nullptr; // void 1672 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1673 return TypeFunc::make(domain, range); 1674 } 1675 1676 static const TypeFunc* make_string_IndexOf_Type() { 1677 int argcnt = 4; 1678 1679 const Type** fields = TypeTuple::fields(argcnt); 1680 int argp = TypeFunc::Parms; 1681 fields[argp++] = TypePtr::NOTNULL; // haystack array 1682 fields[argp++] = TypeInt::INT; // haystack length 1683 fields[argp++] = TypePtr::NOTNULL; // needle array 1684 fields[argp++] = TypeInt::INT; // needle length 1685 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1686 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1687 1688 // result type needed 1689 fields = TypeTuple::fields(1); 1690 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack 1691 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1692 return TypeFunc::make(domain, range); 1693 } 1694 1695 static const TypeFunc* make_base64_decodeBlock_Type() { 1696 int argcnt = 7; 1697 1698 const Type** fields = TypeTuple::fields(argcnt); 1699 int argp = TypeFunc::Parms; 1700 fields[argp++] = TypePtr::NOTNULL; // src array 1701 fields[argp++] = TypeInt::INT; // src offset 1702 fields[argp++] = TypeInt::INT; // src length 1703 fields[argp++] = TypePtr::NOTNULL; // dest array 1704 fields[argp++] = TypeInt::INT; // dest offset 1705 fields[argp++] = TypeInt::BOOL; // isURL 1706 fields[argp++] = TypeInt::BOOL; // isMIME 1707 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1708 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1709 1710 // result type needed 1711 fields = TypeTuple::fields(1); 1712 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst 1713 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1714 return TypeFunc::make(domain, range); 1715 } 1716 1717 static const TypeFunc* make_poly1305_processBlocks_Type() { 1718 int argcnt = 4; 1719 1720 const Type** fields = TypeTuple::fields(argcnt); 1721 int argp = TypeFunc::Parms; 1722 fields[argp++] = TypePtr::NOTNULL; // input array 1723 fields[argp++] = TypeInt::INT; // input length 1724 fields[argp++] = TypePtr::NOTNULL; // accumulator array 1725 fields[argp++] = TypePtr::NOTNULL; // r array 1726 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1727 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1728 1729 // result type needed 1730 fields = TypeTuple::fields(1); 1731 fields[TypeFunc::Parms + 0] = nullptr; // void 1732 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1733 return TypeFunc::make(domain, range); 1734 } 1735 1736 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() { 1737 int argcnt = 3; 1738 1739 const Type** fields = TypeTuple::fields(argcnt); 1740 int argp = TypeFunc::Parms; 1741 fields[argp++] = TypePtr::NOTNULL; // a array 1742 fields[argp++] = TypePtr::NOTNULL; // b array 1743 fields[argp++] = TypePtr::NOTNULL; // r(esult) array 1744 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1745 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1746 1747 // result type needed 1748 fields = TypeTuple::fields(1); 1749 fields[TypeFunc::Parms + 0] = nullptr; // void 1750 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1751 return TypeFunc::make(domain, range); 1752 } 1753 1754 static const TypeFunc* make_intpoly_assign_Type() { 1755 int argcnt = 4; 1756 1757 const Type** fields = TypeTuple::fields(argcnt); 1758 int argp = TypeFunc::Parms; 1759 fields[argp++] = TypeInt::INT; // set flag 1760 fields[argp++] = TypePtr::NOTNULL; // a array (result) 1761 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set) 1762 fields[argp++] = TypeInt::INT; // array length 1763 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1764 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1765 1766 // result type needed 1767 fields = TypeTuple::fields(1); 1768 fields[TypeFunc::Parms + 0] = nullptr; // void 1769 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1770 return TypeFunc::make(domain, range); 1771 } 1772 1773 //------------- Interpreter state for on stack replacement 1774 static const TypeFunc* make_osr_end_Type() { 1775 // create input type (domain) 1776 const Type **fields = TypeTuple::fields(1); 1777 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1778 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1779 1780 // create result type 1781 fields = TypeTuple::fields(1); 1782 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1783 fields[TypeFunc::Parms+0] = nullptr; // void 1784 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1785 return TypeFunc::make(domain, range); 1786 } 1787 1788 //------------------------------------------------------------------------------------- 1789 // register policy 1790 1791 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1792 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1793 switch (register_save_policy[reg]) { 1794 case 'C': return false; //SOC 1795 case 'E': return true ; //SOE 1796 case 'N': return false; //NS 1797 case 'A': return false; //AS 1798 } 1799 ShouldNotReachHere(); 1800 return false; 1801 } 1802 1803 //----------------------------------------------------------------------- 1804 // Exceptions 1805 // 1806 1807 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1808 1809 // The method is an entry that is always called by a C++ method not 1810 // directly from compiled code. Compiled code will call the C++ method following. 1811 // We can't allow async exception to be installed during exception processing. 1812 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm)) 1813 // The frame we rethrow the exception to might not have been processed by the GC yet. 1814 // The stack watermark barrier takes care of detecting that and ensuring the frame 1815 // has updated oops. 1816 StackWatermarkSet::after_unwind(current); 1817 1818 // Do not confuse exception_oop with pending_exception. The exception_oop 1819 // is only used to pass arguments into the method. Not for general 1820 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1821 // the runtime stubs checks this on exit. 1822 assert(current->exception_oop() != nullptr, "exception oop is found"); 1823 address handler_address = nullptr; 1824 1825 Handle exception(current, current->exception_oop()); 1826 address pc = current->exception_pc(); 1827 1828 // Clear out the exception oop and pc since looking up an 1829 // exception handler can cause class loading, which might throw an 1830 // exception and those fields are expected to be clear during 1831 // normal bytecode execution. 1832 current->clear_exception_oop_and_pc(); 1833 1834 LogTarget(Info, exceptions) lt; 1835 if (lt.is_enabled()) { 1836 LogStream ls(lt); 1837 trace_exception(&ls, exception(), pc, ""); 1838 } 1839 1840 // for AbortVMOnException flag 1841 Exceptions::debug_check_abort(exception); 1842 1843 #ifdef ASSERT 1844 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1845 // should throw an exception here 1846 ShouldNotReachHere(); 1847 } 1848 #endif 1849 1850 // new exception handling: this method is entered only from adapters 1851 // exceptions from compiled java methods are handled in compiled code 1852 // using rethrow node 1853 1854 nm = CodeCache::find_nmethod(pc); 1855 assert(nm != nullptr, "No NMethod found"); 1856 if (nm->is_native_method()) { 1857 fatal("Native method should not have path to exception handling"); 1858 } else { 1859 // we are switching to old paradigm: search for exception handler in caller_frame 1860 // instead in exception handler of caller_frame.sender() 1861 1862 if (JvmtiExport::can_post_on_exceptions()) { 1863 // "Full-speed catching" is not necessary here, 1864 // since we're notifying the VM on every catch. 1865 // Force deoptimization and the rest of the lookup 1866 // will be fine. 1867 deoptimize_caller_frame(current); 1868 } 1869 1870 // Check the stack guard pages. If enabled, look for handler in this frame; 1871 // otherwise, forcibly unwind the frame. 1872 // 1873 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1874 bool force_unwind = !current->stack_overflow_state()->reguard_stack(); 1875 bool deopting = false; 1876 if (nm->is_deopt_pc(pc)) { 1877 deopting = true; 1878 RegisterMap map(current, 1879 RegisterMap::UpdateMap::skip, 1880 RegisterMap::ProcessFrames::include, 1881 RegisterMap::WalkContinuation::skip); 1882 frame deoptee = current->last_frame().sender(&map); 1883 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1884 // Adjust the pc back to the original throwing pc 1885 pc = deoptee.pc(); 1886 } 1887 1888 // If we are forcing an unwind because of stack overflow then deopt is 1889 // irrelevant since we are throwing the frame away anyway. 1890 1891 if (deopting && !force_unwind) { 1892 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1893 } else { 1894 1895 handler_address = 1896 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); 1897 1898 if (handler_address == nullptr) { 1899 bool recursive_exception = false; 1900 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1901 assert (handler_address != nullptr, "must have compiled handler"); 1902 // Update the exception cache only when the unwind was not forced 1903 // and there didn't happen another exception during the computation of the 1904 // compiled exception handler. Checking for exception oop equality is not 1905 // sufficient because some exceptions are pre-allocated and reused. 1906 if (!force_unwind && !recursive_exception) { 1907 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1908 } 1909 } else { 1910 #ifdef ASSERT 1911 bool recursive_exception = false; 1912 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1913 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1914 p2i(handler_address), p2i(computed_address)); 1915 #endif 1916 } 1917 } 1918 1919 current->set_exception_pc(pc); 1920 current->set_exception_handler_pc(handler_address); 1921 1922 // Check if the exception PC is a MethodHandle call site. 1923 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1924 } 1925 1926 // Restore correct return pc. Was saved above. 1927 current->set_exception_oop(exception()); 1928 return handler_address; 1929 1930 JRT_END 1931 1932 // We are entering here from exception_blob 1933 // If there is a compiled exception handler in this method, we will continue there; 1934 // otherwise we will unwind the stack and continue at the caller of top frame method 1935 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1936 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1937 // we looked up the handler for has been deoptimized in the meantime. If it has been 1938 // we must not use the handler and instead return the deopt blob. 1939 address OptoRuntime::handle_exception_C(JavaThread* current) { 1940 // 1941 // We are in Java not VM and in debug mode we have a NoHandleMark 1942 // 1943 #ifndef PRODUCT 1944 SharedRuntime::_find_handler_ctr++; // find exception handler 1945 #endif 1946 DEBUG_ONLY(NoHandleMark __hm;) 1947 nmethod* nm = nullptr; 1948 address handler_address = nullptr; 1949 { 1950 // Enter the VM 1951 1952 ResetNoHandleMark rnhm; 1953 handler_address = handle_exception_C_helper(current, nm); 1954 } 1955 1956 // Back in java: Use no oops, DON'T safepoint 1957 1958 // Now check to see if the handler we are returning is in a now 1959 // deoptimized frame 1960 1961 if (nm != nullptr) { 1962 RegisterMap map(current, 1963 RegisterMap::UpdateMap::skip, 1964 RegisterMap::ProcessFrames::skip, 1965 RegisterMap::WalkContinuation::skip); 1966 frame caller = current->last_frame().sender(&map); 1967 #ifdef ASSERT 1968 assert(caller.is_compiled_frame(), "must be"); 1969 #endif // ASSERT 1970 if (caller.is_deoptimized_frame()) { 1971 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1972 } 1973 } 1974 return handler_address; 1975 } 1976 1977 //------------------------------rethrow---------------------------------------- 1978 // We get here after compiled code has executed a 'RethrowNode'. The callee 1979 // is either throwing or rethrowing an exception. The callee-save registers 1980 // have been restored, synchronized objects have been unlocked and the callee 1981 // stack frame has been removed. The return address was passed in. 1982 // Exception oop is passed as the 1st argument. This routine is then called 1983 // from the stub. On exit, we know where to jump in the caller's code. 1984 // After this C code exits, the stub will pop his frame and end in a jump 1985 // (instead of a return). We enter the caller's default handler. 1986 // 1987 // This must be JRT_LEAF: 1988 // - caller will not change its state as we cannot block on exit, 1989 // therefore raw_exception_handler_for_return_address is all it takes 1990 // to handle deoptimized blobs 1991 // 1992 // However, there needs to be a safepoint check in the middle! So compiled 1993 // safepoints are completely watertight. 1994 // 1995 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1996 // 1997 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1998 // 1999 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 2000 // ret_pc will have been loaded from the stack, so for AArch64 will be signed. 2001 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc)); 2002 2003 #ifndef PRODUCT 2004 SharedRuntime::_rethrow_ctr++; // count rethrows 2005 #endif 2006 assert (exception != nullptr, "should have thrown a NullPointerException"); 2007 #ifdef ASSERT 2008 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 2009 // should throw an exception here 2010 ShouldNotReachHere(); 2011 } 2012 #endif 2013 2014 thread->set_vm_result_oop(exception); 2015 // Frame not compiled (handles deoptimization blob) 2016 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 2017 } 2018 2019 static const TypeFunc* make_rethrow_Type() { 2020 // create input type (domain) 2021 const Type **fields = TypeTuple::fields(1); 2022 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 2023 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 2024 2025 // create result type (range) 2026 fields = TypeTuple::fields(1); 2027 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 2028 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 2029 2030 return TypeFunc::make(domain, range); 2031 } 2032 2033 2034 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 2035 // Deoptimize the caller before continuing, as the compiled 2036 // exception handler table may not be valid. 2037 if (!StressCompiledExceptionHandlers && doit) { 2038 deoptimize_caller_frame(thread); 2039 } 2040 } 2041 2042 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 2043 // Called from within the owner thread, so no need for safepoint 2044 RegisterMap reg_map(thread, 2045 RegisterMap::UpdateMap::include, 2046 RegisterMap::ProcessFrames::include, 2047 RegisterMap::WalkContinuation::skip); 2048 frame stub_frame = thread->last_frame(); 2049 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 2050 frame caller_frame = stub_frame.sender(®_map); 2051 2052 // Deoptimize the caller frame. 2053 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 2054 } 2055 2056 2057 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 2058 // Called from within the owner thread, so no need for safepoint 2059 RegisterMap reg_map(thread, 2060 RegisterMap::UpdateMap::include, 2061 RegisterMap::ProcessFrames::include, 2062 RegisterMap::WalkContinuation::skip); 2063 frame stub_frame = thread->last_frame(); 2064 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 2065 frame caller_frame = stub_frame.sender(®_map); 2066 return caller_frame.is_deoptimized_frame(); 2067 } 2068 2069 static const TypeFunc* make_register_finalizer_Type() { 2070 // create input type (domain) 2071 const Type **fields = TypeTuple::fields(1); 2072 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 2073 // // The JavaThread* is passed to each routine as the last argument 2074 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 2075 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 2076 2077 // create result type (range) 2078 fields = TypeTuple::fields(0); 2079 2080 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 2081 2082 return TypeFunc::make(domain,range); 2083 } 2084 2085 #if INCLUDE_JFR 2086 static const TypeFunc* make_class_id_load_barrier_Type() { 2087 // create input type (domain) 2088 const Type **fields = TypeTuple::fields(1); 2089 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS; 2090 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields); 2091 2092 // create result type (range) 2093 fields = TypeTuple::fields(0); 2094 2095 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields); 2096 2097 return TypeFunc::make(domain,range); 2098 } 2099 #endif // INCLUDE_JFR 2100 2101 //----------------------------------------------------------------------------- 2102 static const TypeFunc* make_dtrace_method_entry_exit_Type() { 2103 // create input type (domain) 2104 const Type **fields = TypeTuple::fields(2); 2105 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 2106 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 2107 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 2108 2109 // create result type (range) 2110 fields = TypeTuple::fields(0); 2111 2112 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 2113 2114 return TypeFunc::make(domain,range); 2115 } 2116 2117 static const TypeFunc* make_dtrace_object_alloc_Type() { 2118 // create input type (domain) 2119 const Type **fields = TypeTuple::fields(2); 2120 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 2121 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 2122 2123 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 2124 2125 // create result type (range) 2126 fields = TypeTuple::fields(0); 2127 2128 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 2129 2130 return TypeFunc::make(domain,range); 2131 } 2132 2133 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current)) 2134 assert(oopDesc::is_oop(obj), "must be a valid oop"); 2135 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 2136 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 2137 JRT_END 2138 2139 //----------------------------------------------------------------------------- 2140 2141 NamedCounter * volatile OptoRuntime::_named_counters = nullptr; 2142 2143 // 2144 // dump the collected NamedCounters. 2145 // 2146 void OptoRuntime::print_named_counters() { 2147 int total_lock_count = 0; 2148 int eliminated_lock_count = 0; 2149 2150 NamedCounter* c = _named_counters; 2151 while (c) { 2152 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 2153 int count = c->count(); 2154 if (count > 0) { 2155 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 2156 if (Verbose) { 2157 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 2158 } 2159 total_lock_count += count; 2160 if (eliminated) { 2161 eliminated_lock_count += count; 2162 } 2163 } 2164 } 2165 c = c->next(); 2166 } 2167 if (total_lock_count > 0) { 2168 tty->print_cr("dynamic locks: %d", total_lock_count); 2169 if (eliminated_lock_count) { 2170 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 2171 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 2172 } 2173 } 2174 } 2175 2176 // 2177 // Allocate a new NamedCounter. The JVMState is used to generate the 2178 // name which consists of method@line for the inlining tree. 2179 // 2180 2181 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 2182 int max_depth = youngest_jvms->depth(); 2183 2184 // Visit scopes from youngest to oldest. 2185 bool first = true; 2186 stringStream st; 2187 for (int depth = max_depth; depth >= 1; depth--) { 2188 JVMState* jvms = youngest_jvms->of_depth(depth); 2189 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; 2190 if (!first) { 2191 st.print(" "); 2192 } else { 2193 first = false; 2194 } 2195 int bci = jvms->bci(); 2196 if (bci < 0) bci = 0; 2197 if (m != nullptr) { 2198 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 2199 } else { 2200 st.print("no method"); 2201 } 2202 st.print("@%d", bci); 2203 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 2204 } 2205 NamedCounter* c = new NamedCounter(st.freeze(), tag); 2206 2207 // atomically add the new counter to the head of the list. We only 2208 // add counters so this is safe. 2209 NamedCounter* head; 2210 do { 2211 c->set_next(nullptr); 2212 head = _named_counters; 2213 c->set_next(head); 2214 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 2215 return c; 2216 } 2217 2218 void OptoRuntime::initialize_types() { 2219 _new_instance_Type = make_new_instance_Type(); 2220 _new_array_Type = make_new_array_Type(); 2221 _multianewarray2_Type = multianewarray_Type(2); 2222 _multianewarray3_Type = multianewarray_Type(3); 2223 _multianewarray4_Type = multianewarray_Type(4); 2224 _multianewarray5_Type = multianewarray_Type(5); 2225 _multianewarrayN_Type = make_multianewarrayN_Type(); 2226 _complete_monitor_enter_Type = make_complete_monitor_enter_Type(); 2227 _complete_monitor_exit_Type = make_complete_monitor_exit_Type(); 2228 _monitor_notify_Type = make_monitor_notify_Type(); 2229 _uncommon_trap_Type = make_uncommon_trap_Type(); 2230 _athrow_Type = make_athrow_Type(); 2231 _rethrow_Type = make_rethrow_Type(); 2232 _Math_D_D_Type = make_Math_D_D_Type(); 2233 _Math_DD_D_Type = make_Math_DD_D_Type(); 2234 _modf_Type = make_modf_Type(); 2235 _l2f_Type = make_l2f_Type(); 2236 _void_long_Type = make_void_long_Type(); 2237 _void_void_Type = make_void_void_Type(); 2238 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type(); 2239 _flush_windows_Type = make_flush_windows_Type(); 2240 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast); 2241 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast); 2242 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic); 2243 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow); 2244 _unsafe_setmemory_Type = make_setmemory_Type(); 2245 _array_fill_Type = make_array_fill_Type(); 2246 _array_sort_Type = make_array_sort_Type(); 2247 _array_partition_Type = make_array_partition_Type(); 2248 _aescrypt_block_Type = make_aescrypt_block_Type(); 2249 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type(); 2250 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type(); 2251 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type(); 2252 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type(); 2253 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true); 2254 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);; 2255 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true); 2256 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false); 2257 _double_keccak_Type = make_double_keccak_Type(); 2258 _multiplyToLen_Type = make_multiplyToLen_Type(); 2259 _montgomeryMultiply_Type = make_montgomeryMultiply_Type(); 2260 _montgomerySquare_Type = make_montgomerySquare_Type(); 2261 _squareToLen_Type = make_squareToLen_Type(); 2262 _mulAdd_Type = make_mulAdd_Type(); 2263 _bigIntegerShift_Type = make_bigIntegerShift_Type(); 2264 _vectorizedMismatch_Type = make_vectorizedMismatch_Type(); 2265 _ghash_processBlocks_Type = make_ghash_processBlocks_Type(); 2266 _chacha20Block_Type = make_chacha20Block_Type(); 2267 _kyberNtt_Type = make_kyberNtt_Type(); 2268 _kyberInverseNtt_Type = make_kyberInverseNtt_Type(); 2269 _kyberNttMult_Type = make_kyberNttMult_Type(); 2270 _kyberAddPoly_2_Type = make_kyberAddPoly_2_Type(); 2271 _kyberAddPoly_3_Type = make_kyberAddPoly_3_Type(); 2272 _kyber12To16_Type = make_kyber12To16_Type(); 2273 _kyberBarrettReduce_Type = make_kyberBarrettReduce_Type(); 2274 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type(); 2275 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type(); 2276 _dilithiumNttMult_Type = make_dilithiumNttMult_Type(); 2277 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type(); 2278 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type(); 2279 _base64_encodeBlock_Type = make_base64_encodeBlock_Type(); 2280 _base64_decodeBlock_Type = make_base64_decodeBlock_Type(); 2281 _string_IndexOf_Type = make_string_IndexOf_Type(); 2282 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type(); 2283 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type(); 2284 _intpoly_assign_Type = make_intpoly_assign_Type(); 2285 _updateBytesCRC32_Type = make_updateBytesCRC32_Type(); 2286 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type(); 2287 _updateBytesAdler32_Type = make_updateBytesAdler32_Type(); 2288 _osr_end_Type = make_osr_end_Type(); 2289 _register_finalizer_Type = make_register_finalizer_Type(); 2290 JFR_ONLY( 2291 _class_id_load_barrier_Type = make_class_id_load_barrier_Type(); 2292 ) 2293 #if INCLUDE_JVMTI 2294 _notify_jvmti_vthread_Type = make_notify_jvmti_vthread_Type(); 2295 #endif // INCLUDE_JVMTI 2296 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type(); 2297 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type(); 2298 } 2299 2300 int trace_exception_counter = 0; 2301 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 2302 trace_exception_counter++; 2303 stringStream tempst; 2304 2305 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 2306 exception_oop->print_value_on(&tempst); 2307 tempst.print(" in "); 2308 CodeBlob* blob = CodeCache::find_blob(exception_pc); 2309 if (blob->is_nmethod()) { 2310 blob->as_nmethod()->method()->print_value_on(&tempst); 2311 } else if (blob->is_runtime_stub()) { 2312 tempst.print("<runtime-stub>"); 2313 } else { 2314 tempst.print("<unknown>"); 2315 } 2316 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 2317 tempst.print("]"); 2318 2319 st->print_raw_cr(tempst.freeze()); 2320 }