1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/vmClasses.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "code/scopeDesc.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "compiler/compilationMemoryStatistic.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/oopMap.hpp" 36 #include "gc/g1/g1HeapRegion.hpp" 37 #include "gc/shared/barrierSet.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/gcLocker.hpp" 40 #include "interpreter/bytecode.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "interpreter/linkResolver.hpp" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/oopFactory.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/flatArrayKlass.hpp" 48 #include "oops/flatArrayOop.inline.hpp" 49 #include "oops/objArrayKlass.hpp" 50 #include "oops/klass.inline.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "oops/typeArrayOop.inline.hpp" 53 #include "opto/ad.hpp" 54 #include "opto/addnode.hpp" 55 #include "opto/callnode.hpp" 56 #include "opto/cfgnode.hpp" 57 #include "opto/graphKit.hpp" 58 #include "opto/machnode.hpp" 59 #include "opto/matcher.hpp" 60 #include "opto/memnode.hpp" 61 #include "opto/mulnode.hpp" 62 #include "opto/output.hpp" 63 #include "opto/runtime.hpp" 64 #include "opto/subnode.hpp" 65 #include "prims/jvmtiExport.hpp" 66 #include "runtime/atomic.hpp" 67 #include "runtime/frame.inline.hpp" 68 #include "runtime/handles.inline.hpp" 69 #include "runtime/interfaceSupport.inline.hpp" 70 #include "runtime/javaCalls.hpp" 71 #include "runtime/sharedRuntime.hpp" 72 #include "runtime/signature.hpp" 73 #include "runtime/stackWatermarkSet.hpp" 74 #include "runtime/synchronizer.hpp" 75 #include "runtime/threadCritical.hpp" 76 #include "runtime/threadWXSetters.inline.hpp" 77 #include "runtime/vframe.hpp" 78 #include "runtime/vframeArray.hpp" 79 #include "runtime/vframe_hp.hpp" 80 #include "utilities/copy.hpp" 81 #include "utilities/preserveException.hpp" 82 83 84 // For debugging purposes: 85 // To force FullGCALot inside a runtime function, add the following two lines 86 // 87 // Universe::release_fullgc_alot_dummy(); 88 // Universe::heap()->collect(); 89 // 90 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 91 92 93 #define C2_BLOB_FIELD_DEFINE(name, type) \ 94 type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; 95 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 96 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \ 97 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; 98 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \ 99 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr; 100 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE) 101 #undef C2_BLOB_FIELD_DEFINE 102 #undef C2_STUB_FIELD_DEFINE 103 #undef C2_JVMTI_STUB_FIELD_DEFINE 104 105 #define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", 106 #define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, 107 #define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, 108 const char* OptoRuntime::_stub_names[] = { 109 C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) 110 }; 111 #undef C2_BLOB_NAME_DEFINE 112 #undef C2_STUB_NAME_DEFINE 113 #undef C2_JVMTI_STUB_NAME_DEFINE 114 115 // This should be called in an assertion at the start of OptoRuntime routines 116 // which are entered from compiled code (all of them) 117 #ifdef ASSERT 118 static bool check_compiled_frame(JavaThread* thread) { 119 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 120 RegisterMap map(thread, 121 RegisterMap::UpdateMap::skip, 122 RegisterMap::ProcessFrames::include, 123 RegisterMap::WalkContinuation::skip); 124 frame caller = thread->last_frame().sender(&map); 125 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 126 return true; 127 } 128 #endif // ASSERT 129 130 /* 131 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ 132 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ 133 if (var == nullptr) { return false; } 134 */ 135 136 #define GEN_C2_BLOB(name, type) \ 137 generate_ ## name ## _blob(); 138 139 // a few helper macros to conjure up generate_stub call arguments 140 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 141 #define C2_STUB_TYPEFUNC(name) name ## _Type 142 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) 143 #define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) 144 145 // Almost all the C functions targeted from the generated stubs are 146 // implemented locally to OptoRuntime with names that can be generated 147 // from the stub name by appending suffix '_C'. However, in two cases 148 // a common target method also needs to be called from shared runtime 149 // stubs. In these two cases the opto stubs rely on method 150 // imlementations defined in class SharedRuntime. The following 151 // defines temporarily rebind the generated names to reference the 152 // relevant implementations. 153 154 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \ 155 C2_STUB_FIELD_NAME(name) = \ 156 generate_stub(env, \ 157 C2_STUB_TYPEFUNC(name), \ 158 C2_STUB_C_FUNC(name), \ 159 C2_STUB_NAME(name), \ 160 fancy_jump, \ 161 pass_tls, \ 162 pass_retpc); \ 163 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \ 164 165 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name) 166 167 #define GEN_C2_JVMTI_STUB(name) \ 168 STUB_FIELD_NAME(name) = \ 169 generate_stub(env, \ 170 notify_jvmti_vthread_Type, \ 171 C2_JVMTI_STUB_C_FUNC(name), \ 172 C2_STUB_NAME(name), \ 173 0, \ 174 true, \ 175 false); \ 176 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \ 177 178 bool OptoRuntime::generate(ciEnv* env) { 179 180 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB) 181 182 return true; 183 } 184 185 #undef GEN_C2_BLOB 186 187 #undef C2_STUB_FIELD_NAME 188 #undef C2_STUB_TYPEFUNC 189 #undef C2_STUB_C_FUNC 190 #undef C2_STUB_NAME 191 #undef GEN_C2_STUB 192 193 #undef C2_JVMTI_STUB_C_FUNC 194 #undef GEN_C2_JVMTI_STUB 195 // #undef gen 196 197 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr; 198 const TypeFunc* OptoRuntime::_new_array_Type = nullptr; 199 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr; 200 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr; 201 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr; 202 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr; 203 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr; 204 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr; 205 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr; 206 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr; 207 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr; 208 const TypeFunc* OptoRuntime::_athrow_Type = nullptr; 209 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr; 210 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr; 211 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr; 212 const TypeFunc* OptoRuntime::_modf_Type = nullptr; 213 const TypeFunc* OptoRuntime::_l2f_Type = nullptr; 214 const TypeFunc* OptoRuntime::_void_long_Type = nullptr; 215 const TypeFunc* OptoRuntime::_void_void_Type = nullptr; 216 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr; 217 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr; 218 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr; 219 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr; 220 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr; 221 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr; 222 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr; 223 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr; 224 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr; 225 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr; 226 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr; 227 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr; 228 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr; 229 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr; 230 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr; 231 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr; 232 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr; 233 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr; 234 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr; 235 const TypeFunc* OptoRuntime::_double_keccak_Type = nullptr; 236 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr; 237 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr; 238 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr; 239 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr; 240 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr; 241 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr; 242 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr; 243 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr; 244 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr; 245 246 const TypeFunc* OptoRuntime::_dilithiumAlmostNtt_Type = nullptr; 247 const TypeFunc* OptoRuntime::_dilithiumAlmostInverseNtt_Type = nullptr; 248 const TypeFunc* OptoRuntime::_dilithiumNttMult_Type = nullptr; 249 const TypeFunc* OptoRuntime::_dilithiumMontMulByConstant_Type = nullptr; 250 const TypeFunc* OptoRuntime::_dilithiumDecomposePoly_Type = nullptr; 251 252 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr; 253 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr; 254 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr; 255 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr; 256 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr; 257 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr; 258 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr; 259 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr; 260 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr; 261 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr; 262 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr; 263 #if INCLUDE_JFR 264 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr; 265 #endif // INCLUDE_JFR 266 #if INCLUDE_JVMTI 267 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type = nullptr; 268 #endif // INCLUDE_JVMTI 269 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr; 270 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr; 271 272 // Helper method to do generation of RunTimeStub's 273 address OptoRuntime::generate_stub(ciEnv* env, 274 TypeFunc_generator gen, address C_function, 275 const char *name, int is_fancy_jump, 276 bool pass_tls, 277 bool return_pc) { 278 279 // Matching the default directive, we currently have no method to match. 280 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization)); 281 CompilationMemoryStatisticMark cmsm(directive); 282 ResourceMark rm; 283 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive); 284 DirectivesStack::release(directive); 285 return C.stub_entry_point(); 286 } 287 288 const char* OptoRuntime::stub_name(address entry) { 289 #ifndef PRODUCT 290 CodeBlob* cb = CodeCache::find_blob(entry); 291 RuntimeStub* rs =(RuntimeStub *)cb; 292 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); 293 return rs->name(); 294 #else 295 // Fast implementation for product mode (maybe it should be inlined too) 296 return "runtime stub"; 297 #endif 298 } 299 300 // local methods passed as arguments to stub generator that forward 301 // control to corresponding JRT methods of SharedRuntime 302 303 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 304 oopDesc* dest, jint dest_pos, 305 jint length, JavaThread* thread) { 306 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread); 307 } 308 309 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) { 310 SharedRuntime::complete_monitor_locking_C(obj, lock, current); 311 } 312 313 314 //============================================================================= 315 // Opto compiler runtime routines 316 //============================================================================= 317 318 319 //=============================allocation====================================== 320 // We failed the fast-path allocation. Now we need to do a scavenge or GC 321 // and try allocation again. 322 323 // object allocation 324 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, bool is_larval, JavaThread* current)) 325 JRT_BLOCK; 326 #ifndef PRODUCT 327 SharedRuntime::_new_instance_ctr++; // new instance requires GC 328 #endif 329 assert(check_compiled_frame(current), "incorrect caller"); 330 331 // These checks are cheap to make and support reflective allocation. 332 int lh = klass->layout_helper(); 333 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 334 Handle holder(current, klass->klass_holder()); // keep the klass alive 335 klass->check_valid_for_instantiation(false, THREAD); 336 if (!HAS_PENDING_EXCEPTION) { 337 InstanceKlass::cast(klass)->initialize(THREAD); 338 } 339 } 340 341 if (!HAS_PENDING_EXCEPTION) { 342 // Scavenge and allocate an instance. 343 Handle holder(current, klass->klass_holder()); // keep the klass alive 344 instanceOop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 345 if (is_larval) { 346 // Check if this is a larval buffer allocation 347 result->set_mark(result->mark().enter_larval_state()); 348 } 349 current->set_vm_result(result); 350 351 // Pass oops back through thread local storage. Our apparent type to Java 352 // is that we return an oop, but we can block on exit from this routine and 353 // a GC can trash the oop in C's return register. The generated stub will 354 // fetch the oop from TLS after any possible GC. 355 } 356 357 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 358 JRT_BLOCK_END; 359 360 // inform GC that we won't do card marks for initializing writes. 361 SharedRuntime::on_slowpath_allocation_exit(current); 362 JRT_END 363 364 365 // array allocation 366 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current)) 367 JRT_BLOCK; 368 #ifndef PRODUCT 369 SharedRuntime::_new_array_ctr++; // new array requires GC 370 #endif 371 assert(check_compiled_frame(current), "incorrect caller"); 372 373 // Scavenge and allocate an instance. 374 oop result; 375 376 if (array_type->is_flatArray_klass()) { 377 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 378 FlatArrayKlass* fak = FlatArrayKlass::cast(array_type); 379 Klass* elem_type = fak->element_klass(); 380 result = oopFactory::new_flatArray(elem_type, len, fak->layout_kind(), THREAD); 381 } else if (array_type->is_typeArray_klass()) { 382 // The oopFactory likes to work with the element type. 383 // (We could bypass the oopFactory, since it doesn't add much value.) 384 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 385 result = oopFactory::new_typeArray(elem_type, len, THREAD); 386 } else { 387 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 388 result = ObjArrayKlass::cast(array_type)->allocate(len, THREAD); 389 } 390 391 // Pass oops back through thread local storage. Our apparent type to Java 392 // is that we return an oop, but we can block on exit from this routine and 393 // a GC can trash the oop in C's return register. The generated stub will 394 // fetch the oop from TLS after any possible GC. 395 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 396 current->set_vm_result(result); 397 JRT_BLOCK_END; 398 399 // inform GC that we won't do card marks for initializing writes. 400 SharedRuntime::on_slowpath_allocation_exit(current); 401 JRT_END 402 403 // array allocation without zeroing 404 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current)) 405 JRT_BLOCK; 406 #ifndef PRODUCT 407 SharedRuntime::_new_array_ctr++; // new array requires GC 408 #endif 409 assert(check_compiled_frame(current), "incorrect caller"); 410 411 // Scavenge and allocate an instance. 412 oop result; 413 414 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 415 // The oopFactory likes to work with the element type. 416 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 417 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 418 419 // Pass oops back through thread local storage. Our apparent type to Java 420 // is that we return an oop, but we can block on exit from this routine and 421 // a GC can trash the oop in C's return register. The generated stub will 422 // fetch the oop from TLS after any possible GC. 423 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 424 current->set_vm_result(result); 425 JRT_BLOCK_END; 426 427 428 // inform GC that we won't do card marks for initializing writes. 429 SharedRuntime::on_slowpath_allocation_exit(current); 430 431 oop result = current->vm_result(); 432 if ((len > 0) && (result != nullptr) && 433 is_deoptimized_caller_frame(current)) { 434 // Zero array here if the caller is deoptimized. 435 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); 436 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 437 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); 438 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); 439 HeapWord* obj = cast_from_oop<HeapWord*>(result); 440 if (!is_aligned(hs_bytes, BytesPerLong)) { 441 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0; 442 hs_bytes += BytesPerInt; 443 } 444 445 // Optimized zeroing. 446 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); 447 const size_t aligned_hs = hs_bytes / BytesPerLong; 448 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 449 } 450 451 JRT_END 452 453 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 454 455 // multianewarray for 2 dimensions 456 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current)) 457 #ifndef PRODUCT 458 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 459 #endif 460 assert(check_compiled_frame(current), "incorrect caller"); 461 assert(elem_type->is_klass(), "not a class"); 462 jint dims[2]; 463 dims[0] = len1; 464 dims[1] = len2; 465 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 466 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 467 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 468 current->set_vm_result(obj); 469 JRT_END 470 471 // multianewarray for 3 dimensions 472 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current)) 473 #ifndef PRODUCT 474 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 475 #endif 476 assert(check_compiled_frame(current), "incorrect caller"); 477 assert(elem_type->is_klass(), "not a class"); 478 jint dims[3]; 479 dims[0] = len1; 480 dims[1] = len2; 481 dims[2] = len3; 482 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 483 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 484 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 485 current->set_vm_result(obj); 486 JRT_END 487 488 // multianewarray for 4 dimensions 489 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current)) 490 #ifndef PRODUCT 491 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 492 #endif 493 assert(check_compiled_frame(current), "incorrect caller"); 494 assert(elem_type->is_klass(), "not a class"); 495 jint dims[4]; 496 dims[0] = len1; 497 dims[1] = len2; 498 dims[2] = len3; 499 dims[3] = len4; 500 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 501 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 502 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 503 current->set_vm_result(obj); 504 JRT_END 505 506 // multianewarray for 5 dimensions 507 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current)) 508 #ifndef PRODUCT 509 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 510 #endif 511 assert(check_compiled_frame(current), "incorrect caller"); 512 assert(elem_type->is_klass(), "not a class"); 513 jint dims[5]; 514 dims[0] = len1; 515 dims[1] = len2; 516 dims[2] = len3; 517 dims[3] = len4; 518 dims[4] = len5; 519 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 520 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 521 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 522 current->set_vm_result(obj); 523 JRT_END 524 525 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current)) 526 assert(check_compiled_frame(current), "incorrect caller"); 527 assert(elem_type->is_klass(), "not a class"); 528 assert(oop(dims)->is_typeArray(), "not an array"); 529 530 ResourceMark rm; 531 jint len = dims->length(); 532 assert(len > 0, "Dimensions array should contain data"); 533 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 534 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 535 c_dims, len); 536 537 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 538 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 539 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 540 current->set_vm_result(obj); 541 JRT_END 542 543 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current)) 544 545 // Very few notify/notifyAll operations find any threads on the waitset, so 546 // the dominant fast-path is to simply return. 547 // Relatedly, it's critical that notify/notifyAll be fast in order to 548 // reduce lock hold times. 549 if (!SafepointSynchronize::is_synchronizing()) { 550 if (ObjectSynchronizer::quick_notify(obj, current, false)) { 551 return; 552 } 553 } 554 555 // This is the case the fast-path above isn't provisioned to handle. 556 // The fast-path is designed to handle frequently arising cases in an efficient manner. 557 // (The fast-path is just a degenerate variant of the slow-path). 558 // Perform the dreaded state transition and pass control into the slow-path. 559 JRT_BLOCK; 560 Handle h_obj(current, obj); 561 ObjectSynchronizer::notify(h_obj, CHECK); 562 JRT_BLOCK_END; 563 JRT_END 564 565 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current)) 566 567 if (!SafepointSynchronize::is_synchronizing() ) { 568 if (ObjectSynchronizer::quick_notify(obj, current, true)) { 569 return; 570 } 571 } 572 573 // This is the case the fast-path above isn't provisioned to handle. 574 // The fast-path is designed to handle frequently arising cases in an efficient manner. 575 // (The fast-path is just a degenerate variant of the slow-path). 576 // Perform the dreaded state transition and pass control into the slow-path. 577 JRT_BLOCK; 578 Handle h_obj(current, obj); 579 ObjectSynchronizer::notifyall(h_obj, CHECK); 580 JRT_BLOCK_END; 581 JRT_END 582 583 static const TypeFunc* make_new_instance_Type() { 584 // create input type (domain) 585 const Type **fields = TypeTuple::fields(2); 586 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 587 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // is_larval 588 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 589 590 // create result type (range) 591 fields = TypeTuple::fields(1); 592 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 593 594 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 595 596 return TypeFunc::make(domain, range); 597 } 598 599 #if INCLUDE_JVMTI 600 static const TypeFunc* make_notify_jvmti_vthread_Type() { 601 // create input type (domain) 602 const Type **fields = TypeTuple::fields(2); 603 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop 604 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean 605 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 606 607 // no result type needed 608 fields = TypeTuple::fields(1); 609 fields[TypeFunc::Parms+0] = nullptr; // void 610 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 611 612 return TypeFunc::make(domain,range); 613 } 614 #endif 615 616 static const TypeFunc* make_athrow_Type() { 617 // create input type (domain) 618 const Type **fields = TypeTuple::fields(1); 619 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 620 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 621 622 // create result type (range) 623 fields = TypeTuple::fields(0); 624 625 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 626 627 return TypeFunc::make(domain, range); 628 } 629 630 static const TypeFunc* make_new_array_Type() { 631 // create input type (domain) 632 const Type **fields = TypeTuple::fields(2); 633 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 634 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 635 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 636 637 // create result type (range) 638 fields = TypeTuple::fields(1); 639 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 640 641 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 642 643 return TypeFunc::make(domain, range); 644 } 645 646 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) { 647 // create input type (domain) 648 const int nargs = ndim + 1; 649 const Type **fields = TypeTuple::fields(nargs); 650 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 651 for( int i = 1; i < nargs; i++ ) 652 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 653 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 654 655 // create result type (range) 656 fields = TypeTuple::fields(1); 657 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 658 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 659 660 return TypeFunc::make(domain, range); 661 } 662 663 static const TypeFunc* make_multianewarrayN_Type() { 664 // create input type (domain) 665 const Type **fields = TypeTuple::fields(2); 666 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 667 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 668 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 669 670 // create result type (range) 671 fields = TypeTuple::fields(1); 672 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 673 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 674 675 return TypeFunc::make(domain, range); 676 } 677 678 static const TypeFunc* make_uncommon_trap_Type() { 679 // create input type (domain) 680 const Type **fields = TypeTuple::fields(1); 681 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 682 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 683 684 // create result type (range) 685 fields = TypeTuple::fields(0); 686 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 687 688 return TypeFunc::make(domain, range); 689 } 690 691 //----------------------------------------------------------------------------- 692 // Monitor Handling 693 694 static const TypeFunc* make_complete_monitor_enter_Type() { 695 // create input type (domain) 696 const Type **fields = TypeTuple::fields(2); 697 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 698 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 699 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 700 701 // create result type (range) 702 fields = TypeTuple::fields(0); 703 704 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 705 706 return TypeFunc::make(domain, range); 707 } 708 709 //----------------------------------------------------------------------------- 710 711 static const TypeFunc* make_complete_monitor_exit_Type() { 712 // create input type (domain) 713 const Type **fields = TypeTuple::fields(3); 714 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 715 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 716 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 717 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 718 719 // create result type (range) 720 fields = TypeTuple::fields(0); 721 722 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 723 724 return TypeFunc::make(domain, range); 725 } 726 727 static const TypeFunc* make_monitor_notify_Type() { 728 // create input type (domain) 729 const Type **fields = TypeTuple::fields(1); 730 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 731 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 732 733 // create result type (range) 734 fields = TypeTuple::fields(0); 735 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 736 return TypeFunc::make(domain, range); 737 } 738 739 static const TypeFunc* make_flush_windows_Type() { 740 // create input type (domain) 741 const Type** fields = TypeTuple::fields(1); 742 fields[TypeFunc::Parms+0] = nullptr; // void 743 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 744 745 // create result type 746 fields = TypeTuple::fields(1); 747 fields[TypeFunc::Parms+0] = nullptr; // void 748 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 749 750 return TypeFunc::make(domain, range); 751 } 752 753 static const TypeFunc* make_l2f_Type() { 754 // create input type (domain) 755 const Type **fields = TypeTuple::fields(2); 756 fields[TypeFunc::Parms+0] = TypeLong::LONG; 757 fields[TypeFunc::Parms+1] = Type::HALF; 758 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 759 760 // create result type (range) 761 fields = TypeTuple::fields(1); 762 fields[TypeFunc::Parms+0] = Type::FLOAT; 763 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 764 765 return TypeFunc::make(domain, range); 766 } 767 768 static const TypeFunc* make_modf_Type() { 769 const Type **fields = TypeTuple::fields(2); 770 fields[TypeFunc::Parms+0] = Type::FLOAT; 771 fields[TypeFunc::Parms+1] = Type::FLOAT; 772 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 773 774 // create result type (range) 775 fields = TypeTuple::fields(1); 776 fields[TypeFunc::Parms+0] = Type::FLOAT; 777 778 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 779 780 return TypeFunc::make(domain, range); 781 } 782 783 static const TypeFunc* make_Math_D_D_Type() { 784 // create input type (domain) 785 const Type **fields = TypeTuple::fields(2); 786 // Symbol* name of class to be loaded 787 fields[TypeFunc::Parms+0] = Type::DOUBLE; 788 fields[TypeFunc::Parms+1] = Type::HALF; 789 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 790 791 // create result type (range) 792 fields = TypeTuple::fields(2); 793 fields[TypeFunc::Parms+0] = Type::DOUBLE; 794 fields[TypeFunc::Parms+1] = Type::HALF; 795 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 796 797 return TypeFunc::make(domain, range); 798 } 799 800 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) { 801 // create input type (domain) 802 const Type **fields = TypeTuple::fields(num_arg); 803 // Symbol* name of class to be loaded 804 assert(num_arg > 0, "must have at least 1 input"); 805 for (uint i = 0; i < num_arg; i++) { 806 fields[TypeFunc::Parms+i] = in_type; 807 } 808 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields); 809 810 // create result type (range) 811 const uint num_ret = 1; 812 fields = TypeTuple::fields(num_ret); 813 fields[TypeFunc::Parms+0] = out_type; 814 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields); 815 816 return TypeFunc::make(domain, range); 817 } 818 819 static const TypeFunc* make_Math_DD_D_Type() { 820 const Type **fields = TypeTuple::fields(4); 821 fields[TypeFunc::Parms+0] = Type::DOUBLE; 822 fields[TypeFunc::Parms+1] = Type::HALF; 823 fields[TypeFunc::Parms+2] = Type::DOUBLE; 824 fields[TypeFunc::Parms+3] = Type::HALF; 825 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 826 827 // create result type (range) 828 fields = TypeTuple::fields(2); 829 fields[TypeFunc::Parms+0] = Type::DOUBLE; 830 fields[TypeFunc::Parms+1] = Type::HALF; 831 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 832 833 return TypeFunc::make(domain, range); 834 } 835 836 //-------------- currentTimeMillis, currentTimeNanos, etc 837 838 static const TypeFunc* make_void_long_Type() { 839 // create input type (domain) 840 const Type **fields = TypeTuple::fields(0); 841 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 842 843 // create result type (range) 844 fields = TypeTuple::fields(2); 845 fields[TypeFunc::Parms+0] = TypeLong::LONG; 846 fields[TypeFunc::Parms+1] = Type::HALF; 847 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 848 849 return TypeFunc::make(domain, range); 850 } 851 852 static const TypeFunc* make_void_void_Type() { 853 // create input type (domain) 854 const Type **fields = TypeTuple::fields(0); 855 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 856 857 // create result type (range) 858 fields = TypeTuple::fields(0); 859 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 860 return TypeFunc::make(domain, range); 861 } 862 863 static const TypeFunc* make_jfr_write_checkpoint_Type() { 864 // create input type (domain) 865 const Type **fields = TypeTuple::fields(0); 866 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 867 868 // create result type (range) 869 fields = TypeTuple::fields(0); 870 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 871 return TypeFunc::make(domain, range); 872 } 873 874 875 // Takes as parameters: 876 // void *dest 877 // long size 878 // uchar byte 879 880 static const TypeFunc* make_setmemory_Type() { 881 // create input type (domain) 882 int argcnt = NOT_LP64(3) LP64_ONLY(4); 883 const Type** fields = TypeTuple::fields(argcnt); 884 int argp = TypeFunc::Parms; 885 fields[argp++] = TypePtr::NOTNULL; // dest 886 fields[argp++] = TypeX_X; // size 887 LP64_ONLY(fields[argp++] = Type::HALF); // size 888 fields[argp++] = TypeInt::UBYTE; // bytevalue 889 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 890 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 891 892 // no result type needed 893 fields = TypeTuple::fields(1); 894 fields[TypeFunc::Parms+0] = nullptr; // void 895 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 896 return TypeFunc::make(domain, range); 897 } 898 899 // arraycopy stub variations: 900 enum ArrayCopyType { 901 ac_fast, // void(ptr, ptr, size_t) 902 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 903 ac_slow, // void(ptr, int, ptr, int, int) 904 ac_generic // int(ptr, int, ptr, int, int) 905 }; 906 907 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 908 // create input type (domain) 909 int num_args = (act == ac_fast ? 3 : 5); 910 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 911 int argcnt = num_args; 912 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 913 const Type** fields = TypeTuple::fields(argcnt); 914 int argp = TypeFunc::Parms; 915 fields[argp++] = TypePtr::NOTNULL; // src 916 if (num_size_args == 0) { 917 fields[argp++] = TypeInt::INT; // src_pos 918 } 919 fields[argp++] = TypePtr::NOTNULL; // dest 920 if (num_size_args == 0) { 921 fields[argp++] = TypeInt::INT; // dest_pos 922 fields[argp++] = TypeInt::INT; // length 923 } 924 while (num_size_args-- > 0) { 925 fields[argp++] = TypeX_X; // size in whatevers (size_t) 926 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 927 } 928 if (act == ac_checkcast) { 929 fields[argp++] = TypePtr::NOTNULL; // super_klass 930 } 931 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 932 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 933 934 // create result type if needed 935 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 936 fields = TypeTuple::fields(1); 937 if (retcnt == 0) 938 fields[TypeFunc::Parms+0] = nullptr; // void 939 else 940 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 941 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 942 return TypeFunc::make(domain, range); 943 } 944 945 static const TypeFunc* make_array_fill_Type() { 946 const Type** fields; 947 int argp = TypeFunc::Parms; 948 // create input type (domain): pointer, int, size_t 949 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 950 fields[argp++] = TypePtr::NOTNULL; 951 fields[argp++] = TypeInt::INT; 952 fields[argp++] = TypeX_X; // size in whatevers (size_t) 953 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 954 const TypeTuple *domain = TypeTuple::make(argp, fields); 955 956 // create result type 957 fields = TypeTuple::fields(1); 958 fields[TypeFunc::Parms+0] = nullptr; // void 959 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 960 961 return TypeFunc::make(domain, range); 962 } 963 964 static const TypeFunc* make_array_partition_Type() { 965 // create input type (domain) 966 int num_args = 7; 967 int argcnt = num_args; 968 const Type** fields = TypeTuple::fields(argcnt); 969 int argp = TypeFunc::Parms; 970 fields[argp++] = TypePtr::NOTNULL; // array 971 fields[argp++] = TypeInt::INT; // element type 972 fields[argp++] = TypeInt::INT; // low 973 fields[argp++] = TypeInt::INT; // end 974 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array) 975 fields[argp++] = TypeInt::INT; // indexPivot1 976 fields[argp++] = TypeInt::INT; // indexPivot2 977 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 978 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 979 980 // no result type needed 981 fields = TypeTuple::fields(1); 982 fields[TypeFunc::Parms+0] = nullptr; // void 983 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 984 return TypeFunc::make(domain, range); 985 } 986 987 static const TypeFunc* make_array_sort_Type() { 988 // create input type (domain) 989 int num_args = 4; 990 int argcnt = num_args; 991 const Type** fields = TypeTuple::fields(argcnt); 992 int argp = TypeFunc::Parms; 993 fields[argp++] = TypePtr::NOTNULL; // array 994 fields[argp++] = TypeInt::INT; // element type 995 fields[argp++] = TypeInt::INT; // fromIndex 996 fields[argp++] = TypeInt::INT; // toIndex 997 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 998 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 999 1000 // no result type needed 1001 fields = TypeTuple::fields(1); 1002 fields[TypeFunc::Parms+0] = nullptr; // void 1003 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1004 return TypeFunc::make(domain, range); 1005 } 1006 1007 static const TypeFunc* make_aescrypt_block_Type() { 1008 // create input type (domain) 1009 int num_args = 3; 1010 int argcnt = num_args; 1011 const Type** fields = TypeTuple::fields(argcnt); 1012 int argp = TypeFunc::Parms; 1013 fields[argp++] = TypePtr::NOTNULL; // src 1014 fields[argp++] = TypePtr::NOTNULL; // dest 1015 fields[argp++] = TypePtr::NOTNULL; // k array 1016 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1017 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1018 1019 // no result type needed 1020 fields = TypeTuple::fields(1); 1021 fields[TypeFunc::Parms+0] = nullptr; // void 1022 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1023 return TypeFunc::make(domain, range); 1024 } 1025 1026 static const TypeFunc* make_updateBytesCRC32_Type() { 1027 // create input type (domain) 1028 int num_args = 3; 1029 int argcnt = num_args; 1030 const Type** fields = TypeTuple::fields(argcnt); 1031 int argp = TypeFunc::Parms; 1032 fields[argp++] = TypeInt::INT; // crc 1033 fields[argp++] = TypePtr::NOTNULL; // src 1034 fields[argp++] = TypeInt::INT; // len 1035 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1036 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1037 1038 // result type needed 1039 fields = TypeTuple::fields(1); 1040 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1041 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1042 return TypeFunc::make(domain, range); 1043 } 1044 1045 static const TypeFunc* make_updateBytesCRC32C_Type() { 1046 // create input type (domain) 1047 int num_args = 4; 1048 int argcnt = num_args; 1049 const Type** fields = TypeTuple::fields(argcnt); 1050 int argp = TypeFunc::Parms; 1051 fields[argp++] = TypeInt::INT; // crc 1052 fields[argp++] = TypePtr::NOTNULL; // buf 1053 fields[argp++] = TypeInt::INT; // len 1054 fields[argp++] = TypePtr::NOTNULL; // table 1055 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1056 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1057 1058 // result type needed 1059 fields = TypeTuple::fields(1); 1060 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1061 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1062 return TypeFunc::make(domain, range); 1063 } 1064 1065 static const TypeFunc* make_updateBytesAdler32_Type() { 1066 // create input type (domain) 1067 int num_args = 3; 1068 int argcnt = num_args; 1069 const Type** fields = TypeTuple::fields(argcnt); 1070 int argp = TypeFunc::Parms; 1071 fields[argp++] = TypeInt::INT; // crc 1072 fields[argp++] = TypePtr::NOTNULL; // src + offset 1073 fields[argp++] = TypeInt::INT; // len 1074 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1075 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1076 1077 // result type needed 1078 fields = TypeTuple::fields(1); 1079 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1080 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1081 return TypeFunc::make(domain, range); 1082 } 1083 1084 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() { 1085 // create input type (domain) 1086 int num_args = 5; 1087 int argcnt = num_args; 1088 const Type** fields = TypeTuple::fields(argcnt); 1089 int argp = TypeFunc::Parms; 1090 fields[argp++] = TypePtr::NOTNULL; // src 1091 fields[argp++] = TypePtr::NOTNULL; // dest 1092 fields[argp++] = TypePtr::NOTNULL; // k array 1093 fields[argp++] = TypePtr::NOTNULL; // r array 1094 fields[argp++] = TypeInt::INT; // src len 1095 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1096 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1097 1098 // returning cipher len (int) 1099 fields = TypeTuple::fields(1); 1100 fields[TypeFunc::Parms+0] = TypeInt::INT; 1101 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1102 return TypeFunc::make(domain, range); 1103 } 1104 1105 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() { 1106 // create input type (domain) 1107 int num_args = 4; 1108 int argcnt = num_args; 1109 const Type** fields = TypeTuple::fields(argcnt); 1110 int argp = TypeFunc::Parms; 1111 fields[argp++] = TypePtr::NOTNULL; // src 1112 fields[argp++] = TypePtr::NOTNULL; // dest 1113 fields[argp++] = TypePtr::NOTNULL; // k array 1114 fields[argp++] = TypeInt::INT; // src len 1115 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1116 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1117 1118 // returning cipher len (int) 1119 fields = TypeTuple::fields(1); 1120 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1121 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1122 return TypeFunc::make(domain, range); 1123 } 1124 1125 static const TypeFunc* make_counterMode_aescrypt_Type() { 1126 // create input type (domain) 1127 int num_args = 7; 1128 int argcnt = num_args; 1129 const Type** fields = TypeTuple::fields(argcnt); 1130 int argp = TypeFunc::Parms; 1131 fields[argp++] = TypePtr::NOTNULL; // src 1132 fields[argp++] = TypePtr::NOTNULL; // dest 1133 fields[argp++] = TypePtr::NOTNULL; // k array 1134 fields[argp++] = TypePtr::NOTNULL; // counter array 1135 fields[argp++] = TypeInt::INT; // src len 1136 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 1137 fields[argp++] = TypePtr::NOTNULL; // saved used addr 1138 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1139 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1140 // returning cipher len (int) 1141 fields = TypeTuple::fields(1); 1142 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1143 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1144 return TypeFunc::make(domain, range); 1145 } 1146 1147 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() { 1148 // create input type (domain) 1149 int num_args = 8; 1150 int argcnt = num_args; 1151 const Type** fields = TypeTuple::fields(argcnt); 1152 int argp = TypeFunc::Parms; 1153 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs 1154 fields[argp++] = TypeInt::INT; // int len 1155 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs 1156 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs 1157 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj 1158 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj 1159 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj 1160 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj 1161 1162 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1163 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1164 // returning cipher len (int) 1165 fields = TypeTuple::fields(1); 1166 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1167 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1168 return TypeFunc::make(domain, range); 1169 } 1170 1171 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) { 1172 // create input type (domain) 1173 int num_args = is_sha3 ? 3 : 2; 1174 int argcnt = num_args; 1175 const Type** fields = TypeTuple::fields(argcnt); 1176 int argp = TypeFunc::Parms; 1177 fields[argp++] = TypePtr::NOTNULL; // buf 1178 fields[argp++] = TypePtr::NOTNULL; // state 1179 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1180 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1181 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1182 1183 // no result type needed 1184 fields = TypeTuple::fields(1); 1185 fields[TypeFunc::Parms+0] = nullptr; // void 1186 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1187 return TypeFunc::make(domain, range); 1188 } 1189 1190 /* 1191 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 1192 */ 1193 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) { 1194 // create input type (domain) 1195 int num_args = is_sha3 ? 5 : 4; 1196 int argcnt = num_args; 1197 const Type** fields = TypeTuple::fields(argcnt); 1198 int argp = TypeFunc::Parms; 1199 fields[argp++] = TypePtr::NOTNULL; // buf 1200 fields[argp++] = TypePtr::NOTNULL; // state 1201 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1202 fields[argp++] = TypeInt::INT; // ofs 1203 fields[argp++] = TypeInt::INT; // limit 1204 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1205 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1206 1207 // returning ofs (int) 1208 fields = TypeTuple::fields(1); 1209 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1210 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1211 return TypeFunc::make(domain, range); 1212 } 1213 1214 // SHAKE128Parallel doubleKeccak function 1215 static const TypeFunc* make_double_keccak_Type() { 1216 int argcnt = 2; 1217 1218 const Type** fields = TypeTuple::fields(argcnt); 1219 int argp = TypeFunc::Parms; 1220 fields[argp++] = TypePtr::NOTNULL; // status0 1221 fields[argp++] = TypePtr::NOTNULL; // status1 1222 1223 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1224 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1225 1226 // result type needed 1227 fields = TypeTuple::fields(1); 1228 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1229 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1230 return TypeFunc::make(domain, range); 1231 } 1232 1233 static const TypeFunc* make_multiplyToLen_Type() { 1234 // create input type (domain) 1235 int num_args = 5; 1236 int argcnt = num_args; 1237 const Type** fields = TypeTuple::fields(argcnt); 1238 int argp = TypeFunc::Parms; 1239 fields[argp++] = TypePtr::NOTNULL; // x 1240 fields[argp++] = TypeInt::INT; // xlen 1241 fields[argp++] = TypePtr::NOTNULL; // y 1242 fields[argp++] = TypeInt::INT; // ylen 1243 fields[argp++] = TypePtr::NOTNULL; // z 1244 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1245 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1246 1247 // no result type needed 1248 fields = TypeTuple::fields(1); 1249 fields[TypeFunc::Parms+0] = nullptr; 1250 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1251 return TypeFunc::make(domain, range); 1252 } 1253 1254 static const TypeFunc* make_squareToLen_Type() { 1255 // create input type (domain) 1256 int num_args = 4; 1257 int argcnt = num_args; 1258 const Type** fields = TypeTuple::fields(argcnt); 1259 int argp = TypeFunc::Parms; 1260 fields[argp++] = TypePtr::NOTNULL; // x 1261 fields[argp++] = TypeInt::INT; // len 1262 fields[argp++] = TypePtr::NOTNULL; // z 1263 fields[argp++] = TypeInt::INT; // zlen 1264 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1265 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1266 1267 // no result type needed 1268 fields = TypeTuple::fields(1); 1269 fields[TypeFunc::Parms+0] = nullptr; 1270 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1271 return TypeFunc::make(domain, range); 1272 } 1273 1274 static const TypeFunc* make_mulAdd_Type() { 1275 // create input type (domain) 1276 int num_args = 5; 1277 int argcnt = num_args; 1278 const Type** fields = TypeTuple::fields(argcnt); 1279 int argp = TypeFunc::Parms; 1280 fields[argp++] = TypePtr::NOTNULL; // out 1281 fields[argp++] = TypePtr::NOTNULL; // in 1282 fields[argp++] = TypeInt::INT; // offset 1283 fields[argp++] = TypeInt::INT; // len 1284 fields[argp++] = TypeInt::INT; // k 1285 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1286 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1287 1288 // returning carry (int) 1289 fields = TypeTuple::fields(1); 1290 fields[TypeFunc::Parms+0] = TypeInt::INT; 1291 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1292 return TypeFunc::make(domain, range); 1293 } 1294 1295 static const TypeFunc* make_montgomeryMultiply_Type() { 1296 // create input type (domain) 1297 int num_args = 7; 1298 int argcnt = num_args; 1299 const Type** fields = TypeTuple::fields(argcnt); 1300 int argp = TypeFunc::Parms; 1301 fields[argp++] = TypePtr::NOTNULL; // a 1302 fields[argp++] = TypePtr::NOTNULL; // b 1303 fields[argp++] = TypePtr::NOTNULL; // n 1304 fields[argp++] = TypeInt::INT; // len 1305 fields[argp++] = TypeLong::LONG; // inv 1306 fields[argp++] = Type::HALF; 1307 fields[argp++] = TypePtr::NOTNULL; // result 1308 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1309 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1310 1311 // result type needed 1312 fields = TypeTuple::fields(1); 1313 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1314 1315 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1316 return TypeFunc::make(domain, range); 1317 } 1318 1319 static const TypeFunc* make_montgomerySquare_Type() { 1320 // create input type (domain) 1321 int num_args = 6; 1322 int argcnt = num_args; 1323 const Type** fields = TypeTuple::fields(argcnt); 1324 int argp = TypeFunc::Parms; 1325 fields[argp++] = TypePtr::NOTNULL; // a 1326 fields[argp++] = TypePtr::NOTNULL; // n 1327 fields[argp++] = TypeInt::INT; // len 1328 fields[argp++] = TypeLong::LONG; // inv 1329 fields[argp++] = Type::HALF; 1330 fields[argp++] = TypePtr::NOTNULL; // result 1331 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1332 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1333 1334 // result type needed 1335 fields = TypeTuple::fields(1); 1336 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1337 1338 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1339 return TypeFunc::make(domain, range); 1340 } 1341 1342 static const TypeFunc* make_bigIntegerShift_Type() { 1343 int argcnt = 5; 1344 const Type** fields = TypeTuple::fields(argcnt); 1345 int argp = TypeFunc::Parms; 1346 fields[argp++] = TypePtr::NOTNULL; // newArr 1347 fields[argp++] = TypePtr::NOTNULL; // oldArr 1348 fields[argp++] = TypeInt::INT; // newIdx 1349 fields[argp++] = TypeInt::INT; // shiftCount 1350 fields[argp++] = TypeInt::INT; // numIter 1351 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1352 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1353 1354 // no result type needed 1355 fields = TypeTuple::fields(1); 1356 fields[TypeFunc::Parms + 0] = nullptr; 1357 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1358 return TypeFunc::make(domain, range); 1359 } 1360 1361 static const TypeFunc* make_vectorizedMismatch_Type() { 1362 // create input type (domain) 1363 int num_args = 4; 1364 int argcnt = num_args; 1365 const Type** fields = TypeTuple::fields(argcnt); 1366 int argp = TypeFunc::Parms; 1367 fields[argp++] = TypePtr::NOTNULL; // obja 1368 fields[argp++] = TypePtr::NOTNULL; // objb 1369 fields[argp++] = TypeInt::INT; // length, number of elements 1370 fields[argp++] = TypeInt::INT; // log2scale, element size 1371 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1372 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1373 1374 //return mismatch index (int) 1375 fields = TypeTuple::fields(1); 1376 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1377 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1378 return TypeFunc::make(domain, range); 1379 } 1380 1381 static const TypeFunc* make_ghash_processBlocks_Type() { 1382 int argcnt = 4; 1383 1384 const Type** fields = TypeTuple::fields(argcnt); 1385 int argp = TypeFunc::Parms; 1386 fields[argp++] = TypePtr::NOTNULL; // state 1387 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1388 fields[argp++] = TypePtr::NOTNULL; // data 1389 fields[argp++] = TypeInt::INT; // blocks 1390 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1391 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1392 1393 // result type needed 1394 fields = TypeTuple::fields(1); 1395 fields[TypeFunc::Parms+0] = nullptr; // void 1396 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1397 return TypeFunc::make(domain, range); 1398 } 1399 1400 static const TypeFunc* make_chacha20Block_Type() { 1401 int argcnt = 2; 1402 1403 const Type** fields = TypeTuple::fields(argcnt); 1404 int argp = TypeFunc::Parms; 1405 fields[argp++] = TypePtr::NOTNULL; // state 1406 fields[argp++] = TypePtr::NOTNULL; // result 1407 1408 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1409 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1410 1411 // result type needed 1412 fields = TypeTuple::fields(1); 1413 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int 1414 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1415 return TypeFunc::make(domain, range); 1416 } 1417 1418 // Dilithium NTT function except for the final "normalization" to |coeff| < Q 1419 static const TypeFunc* make_dilithiumAlmostNtt_Type() { 1420 int argcnt = 2; 1421 1422 const Type** fields = TypeTuple::fields(argcnt); 1423 int argp = TypeFunc::Parms; 1424 fields[argp++] = TypePtr::NOTNULL; // coeffs 1425 fields[argp++] = TypePtr::NOTNULL; // NTT zetas 1426 1427 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1428 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1429 1430 // result type needed 1431 fields = TypeTuple::fields(1); 1432 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1433 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1434 return TypeFunc::make(domain, range); 1435 } 1436 1437 // Dilithium inverse NTT function except the final mod Q division by 2^256 1438 static const TypeFunc* make_dilithiumAlmostInverseNtt_Type() { 1439 int argcnt = 2; 1440 1441 const Type** fields = TypeTuple::fields(argcnt); 1442 int argp = TypeFunc::Parms; 1443 fields[argp++] = TypePtr::NOTNULL; // coeffs 1444 fields[argp++] = TypePtr::NOTNULL; // inverse NTT zetas 1445 1446 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1447 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1448 1449 // result type needed 1450 fields = TypeTuple::fields(1); 1451 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1452 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1453 return TypeFunc::make(domain, range); 1454 } 1455 1456 // Dilithium NTT multiply function 1457 static const TypeFunc* make_dilithiumNttMult_Type() { 1458 int argcnt = 3; 1459 1460 const Type** fields = TypeTuple::fields(argcnt); 1461 int argp = TypeFunc::Parms; 1462 fields[argp++] = TypePtr::NOTNULL; // result 1463 fields[argp++] = TypePtr::NOTNULL; // ntta 1464 fields[argp++] = TypePtr::NOTNULL; // nttb 1465 1466 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1467 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1468 1469 // result type needed 1470 fields = TypeTuple::fields(1); 1471 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1472 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1473 return TypeFunc::make(domain, range); 1474 } 1475 1476 // Dilithium Montgomery multiply a polynome coefficient array by a constant 1477 static const TypeFunc* make_dilithiumMontMulByConstant_Type() { 1478 int argcnt = 2; 1479 1480 const Type** fields = TypeTuple::fields(argcnt); 1481 int argp = TypeFunc::Parms; 1482 fields[argp++] = TypePtr::NOTNULL; // coeffs 1483 fields[argp++] = TypeInt::INT; // constant multiplier 1484 1485 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1486 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1487 1488 // result type needed 1489 fields = TypeTuple::fields(1); 1490 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1491 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1492 return TypeFunc::make(domain, range); 1493 } 1494 1495 // Dilithium decompose polynomial 1496 static const TypeFunc* make_dilithiumDecomposePoly_Type() { 1497 int argcnt = 5; 1498 1499 const Type** fields = TypeTuple::fields(argcnt); 1500 int argp = TypeFunc::Parms; 1501 fields[argp++] = TypePtr::NOTNULL; // input 1502 fields[argp++] = TypePtr::NOTNULL; // lowPart 1503 fields[argp++] = TypePtr::NOTNULL; // highPart 1504 fields[argp++] = TypeInt::INT; // 2 * gamma2 1505 fields[argp++] = TypeInt::INT; // multiplier 1506 1507 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1508 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1509 1510 // result type needed 1511 fields = TypeTuple::fields(1); 1512 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1513 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1514 return TypeFunc::make(domain, range); 1515 } 1516 1517 static const TypeFunc* make_base64_encodeBlock_Type() { 1518 int argcnt = 6; 1519 1520 const Type** fields = TypeTuple::fields(argcnt); 1521 int argp = TypeFunc::Parms; 1522 fields[argp++] = TypePtr::NOTNULL; // src array 1523 fields[argp++] = TypeInt::INT; // offset 1524 fields[argp++] = TypeInt::INT; // length 1525 fields[argp++] = TypePtr::NOTNULL; // dest array 1526 fields[argp++] = TypeInt::INT; // dp 1527 fields[argp++] = TypeInt::BOOL; // isURL 1528 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1529 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1530 1531 // result type needed 1532 fields = TypeTuple::fields(1); 1533 fields[TypeFunc::Parms + 0] = nullptr; // void 1534 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1535 return TypeFunc::make(domain, range); 1536 } 1537 1538 static const TypeFunc* make_string_IndexOf_Type() { 1539 int argcnt = 4; 1540 1541 const Type** fields = TypeTuple::fields(argcnt); 1542 int argp = TypeFunc::Parms; 1543 fields[argp++] = TypePtr::NOTNULL; // haystack array 1544 fields[argp++] = TypeInt::INT; // haystack length 1545 fields[argp++] = TypePtr::NOTNULL; // needle array 1546 fields[argp++] = TypeInt::INT; // needle length 1547 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1548 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1549 1550 // result type needed 1551 fields = TypeTuple::fields(1); 1552 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack 1553 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1554 return TypeFunc::make(domain, range); 1555 } 1556 1557 static const TypeFunc* make_base64_decodeBlock_Type() { 1558 int argcnt = 7; 1559 1560 const Type** fields = TypeTuple::fields(argcnt); 1561 int argp = TypeFunc::Parms; 1562 fields[argp++] = TypePtr::NOTNULL; // src array 1563 fields[argp++] = TypeInt::INT; // src offset 1564 fields[argp++] = TypeInt::INT; // src length 1565 fields[argp++] = TypePtr::NOTNULL; // dest array 1566 fields[argp++] = TypeInt::INT; // dest offset 1567 fields[argp++] = TypeInt::BOOL; // isURL 1568 fields[argp++] = TypeInt::BOOL; // isMIME 1569 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1570 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1571 1572 // result type needed 1573 fields = TypeTuple::fields(1); 1574 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst 1575 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1576 return TypeFunc::make(domain, range); 1577 } 1578 1579 static const TypeFunc* make_poly1305_processBlocks_Type() { 1580 int argcnt = 4; 1581 1582 const Type** fields = TypeTuple::fields(argcnt); 1583 int argp = TypeFunc::Parms; 1584 fields[argp++] = TypePtr::NOTNULL; // input array 1585 fields[argp++] = TypeInt::INT; // input length 1586 fields[argp++] = TypePtr::NOTNULL; // accumulator array 1587 fields[argp++] = TypePtr::NOTNULL; // r array 1588 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1589 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1590 1591 // result type needed 1592 fields = TypeTuple::fields(1); 1593 fields[TypeFunc::Parms + 0] = nullptr; // void 1594 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1595 return TypeFunc::make(domain, range); 1596 } 1597 1598 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() { 1599 int argcnt = 3; 1600 1601 const Type** fields = TypeTuple::fields(argcnt); 1602 int argp = TypeFunc::Parms; 1603 fields[argp++] = TypePtr::NOTNULL; // a array 1604 fields[argp++] = TypePtr::NOTNULL; // b array 1605 fields[argp++] = TypePtr::NOTNULL; // r(esult) array 1606 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1607 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1608 1609 // result type needed 1610 fields = TypeTuple::fields(1); 1611 fields[TypeFunc::Parms + 0] = nullptr; // void 1612 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1613 return TypeFunc::make(domain, range); 1614 } 1615 1616 static const TypeFunc* make_intpoly_assign_Type() { 1617 int argcnt = 4; 1618 1619 const Type** fields = TypeTuple::fields(argcnt); 1620 int argp = TypeFunc::Parms; 1621 fields[argp++] = TypeInt::INT; // set flag 1622 fields[argp++] = TypePtr::NOTNULL; // a array (result) 1623 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set) 1624 fields[argp++] = TypeInt::INT; // array length 1625 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1626 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1627 1628 // result type needed 1629 fields = TypeTuple::fields(1); 1630 fields[TypeFunc::Parms + 0] = nullptr; // void 1631 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1632 return TypeFunc::make(domain, range); 1633 } 1634 1635 //------------- Interpreter state for on stack replacement 1636 static const TypeFunc* make_osr_end_Type() { 1637 // create input type (domain) 1638 const Type **fields = TypeTuple::fields(1); 1639 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1640 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1641 1642 // create result type 1643 fields = TypeTuple::fields(1); 1644 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1645 fields[TypeFunc::Parms+0] = nullptr; // void 1646 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1647 return TypeFunc::make(domain, range); 1648 } 1649 1650 //------------------------------------------------------------------------------------- 1651 // register policy 1652 1653 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1654 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1655 switch (register_save_policy[reg]) { 1656 case 'C': return false; //SOC 1657 case 'E': return true ; //SOE 1658 case 'N': return false; //NS 1659 case 'A': return false; //AS 1660 } 1661 ShouldNotReachHere(); 1662 return false; 1663 } 1664 1665 //----------------------------------------------------------------------- 1666 // Exceptions 1667 // 1668 1669 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1670 1671 // The method is an entry that is always called by a C++ method not 1672 // directly from compiled code. Compiled code will call the C++ method following. 1673 // We can't allow async exception to be installed during exception processing. 1674 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm)) 1675 // The frame we rethrow the exception to might not have been processed by the GC yet. 1676 // The stack watermark barrier takes care of detecting that and ensuring the frame 1677 // has updated oops. 1678 StackWatermarkSet::after_unwind(current); 1679 1680 // Do not confuse exception_oop with pending_exception. The exception_oop 1681 // is only used to pass arguments into the method. Not for general 1682 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1683 // the runtime stubs checks this on exit. 1684 assert(current->exception_oop() != nullptr, "exception oop is found"); 1685 address handler_address = nullptr; 1686 1687 Handle exception(current, current->exception_oop()); 1688 address pc = current->exception_pc(); 1689 1690 // Clear out the exception oop and pc since looking up an 1691 // exception handler can cause class loading, which might throw an 1692 // exception and those fields are expected to be clear during 1693 // normal bytecode execution. 1694 current->clear_exception_oop_and_pc(); 1695 1696 LogTarget(Info, exceptions) lt; 1697 if (lt.is_enabled()) { 1698 ResourceMark rm; 1699 LogStream ls(lt); 1700 trace_exception(&ls, exception(), pc, ""); 1701 } 1702 1703 // for AbortVMOnException flag 1704 Exceptions::debug_check_abort(exception); 1705 1706 #ifdef ASSERT 1707 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1708 // should throw an exception here 1709 ShouldNotReachHere(); 1710 } 1711 #endif 1712 1713 // new exception handling: this method is entered only from adapters 1714 // exceptions from compiled java methods are handled in compiled code 1715 // using rethrow node 1716 1717 nm = CodeCache::find_nmethod(pc); 1718 assert(nm != nullptr, "No NMethod found"); 1719 if (nm->is_native_method()) { 1720 fatal("Native method should not have path to exception handling"); 1721 } else { 1722 // we are switching to old paradigm: search for exception handler in caller_frame 1723 // instead in exception handler of caller_frame.sender() 1724 1725 if (JvmtiExport::can_post_on_exceptions()) { 1726 // "Full-speed catching" is not necessary here, 1727 // since we're notifying the VM on every catch. 1728 // Force deoptimization and the rest of the lookup 1729 // will be fine. 1730 deoptimize_caller_frame(current); 1731 } 1732 1733 // Check the stack guard pages. If enabled, look for handler in this frame; 1734 // otherwise, forcibly unwind the frame. 1735 // 1736 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1737 bool force_unwind = !current->stack_overflow_state()->reguard_stack(); 1738 bool deopting = false; 1739 if (nm->is_deopt_pc(pc)) { 1740 deopting = true; 1741 RegisterMap map(current, 1742 RegisterMap::UpdateMap::skip, 1743 RegisterMap::ProcessFrames::include, 1744 RegisterMap::WalkContinuation::skip); 1745 frame deoptee = current->last_frame().sender(&map); 1746 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1747 // Adjust the pc back to the original throwing pc 1748 pc = deoptee.pc(); 1749 } 1750 1751 // If we are forcing an unwind because of stack overflow then deopt is 1752 // irrelevant since we are throwing the frame away anyway. 1753 1754 if (deopting && !force_unwind) { 1755 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1756 } else { 1757 1758 handler_address = 1759 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); 1760 1761 if (handler_address == nullptr) { 1762 bool recursive_exception = false; 1763 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1764 assert (handler_address != nullptr, "must have compiled handler"); 1765 // Update the exception cache only when the unwind was not forced 1766 // and there didn't happen another exception during the computation of the 1767 // compiled exception handler. Checking for exception oop equality is not 1768 // sufficient because some exceptions are pre-allocated and reused. 1769 if (!force_unwind && !recursive_exception) { 1770 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1771 } 1772 } else { 1773 #ifdef ASSERT 1774 bool recursive_exception = false; 1775 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1776 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1777 p2i(handler_address), p2i(computed_address)); 1778 #endif 1779 } 1780 } 1781 1782 current->set_exception_pc(pc); 1783 current->set_exception_handler_pc(handler_address); 1784 1785 // Check if the exception PC is a MethodHandle call site. 1786 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1787 } 1788 1789 // Restore correct return pc. Was saved above. 1790 current->set_exception_oop(exception()); 1791 return handler_address; 1792 1793 JRT_END 1794 1795 // We are entering here from exception_blob 1796 // If there is a compiled exception handler in this method, we will continue there; 1797 // otherwise we will unwind the stack and continue at the caller of top frame method 1798 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1799 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1800 // we looked up the handler for has been deoptimized in the meantime. If it has been 1801 // we must not use the handler and instead return the deopt blob. 1802 address OptoRuntime::handle_exception_C(JavaThread* current) { 1803 // 1804 // We are in Java not VM and in debug mode we have a NoHandleMark 1805 // 1806 #ifndef PRODUCT 1807 SharedRuntime::_find_handler_ctr++; // find exception handler 1808 #endif 1809 debug_only(NoHandleMark __hm;) 1810 nmethod* nm = nullptr; 1811 address handler_address = nullptr; 1812 { 1813 // Enter the VM 1814 1815 ResetNoHandleMark rnhm; 1816 handler_address = handle_exception_C_helper(current, nm); 1817 } 1818 1819 // Back in java: Use no oops, DON'T safepoint 1820 1821 // Now check to see if the handler we are returning is in a now 1822 // deoptimized frame 1823 1824 if (nm != nullptr) { 1825 RegisterMap map(current, 1826 RegisterMap::UpdateMap::skip, 1827 RegisterMap::ProcessFrames::skip, 1828 RegisterMap::WalkContinuation::skip); 1829 frame caller = current->last_frame().sender(&map); 1830 #ifdef ASSERT 1831 assert(caller.is_compiled_frame(), "must be"); 1832 #endif // ASSERT 1833 if (caller.is_deoptimized_frame()) { 1834 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1835 } 1836 } 1837 return handler_address; 1838 } 1839 1840 //------------------------------rethrow---------------------------------------- 1841 // We get here after compiled code has executed a 'RethrowNode'. The callee 1842 // is either throwing or rethrowing an exception. The callee-save registers 1843 // have been restored, synchronized objects have been unlocked and the callee 1844 // stack frame has been removed. The return address was passed in. 1845 // Exception oop is passed as the 1st argument. This routine is then called 1846 // from the stub. On exit, we know where to jump in the caller's code. 1847 // After this C code exits, the stub will pop his frame and end in a jump 1848 // (instead of a return). We enter the caller's default handler. 1849 // 1850 // This must be JRT_LEAF: 1851 // - caller will not change its state as we cannot block on exit, 1852 // therefore raw_exception_handler_for_return_address is all it takes 1853 // to handle deoptimized blobs 1854 // 1855 // However, there needs to be a safepoint check in the middle! So compiled 1856 // safepoints are completely watertight. 1857 // 1858 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1859 // 1860 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1861 // 1862 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1863 // ret_pc will have been loaded from the stack, so for AArch64 will be signed. 1864 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc)); 1865 1866 #ifndef PRODUCT 1867 SharedRuntime::_rethrow_ctr++; // count rethrows 1868 #endif 1869 assert (exception != nullptr, "should have thrown a NullPointerException"); 1870 #ifdef ASSERT 1871 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1872 // should throw an exception here 1873 ShouldNotReachHere(); 1874 } 1875 #endif 1876 1877 thread->set_vm_result(exception); 1878 // Frame not compiled (handles deoptimization blob) 1879 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1880 } 1881 1882 static const TypeFunc* make_rethrow_Type() { 1883 // create input type (domain) 1884 const Type **fields = TypeTuple::fields(1); 1885 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1886 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1887 1888 // create result type (range) 1889 fields = TypeTuple::fields(1); 1890 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1891 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1892 1893 return TypeFunc::make(domain, range); 1894 } 1895 1896 1897 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1898 // Deoptimize the caller before continuing, as the compiled 1899 // exception handler table may not be valid. 1900 if (!StressCompiledExceptionHandlers && doit) { 1901 deoptimize_caller_frame(thread); 1902 } 1903 } 1904 1905 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1906 // Called from within the owner thread, so no need for safepoint 1907 RegisterMap reg_map(thread, 1908 RegisterMap::UpdateMap::include, 1909 RegisterMap::ProcessFrames::include, 1910 RegisterMap::WalkContinuation::skip); 1911 frame stub_frame = thread->last_frame(); 1912 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1913 frame caller_frame = stub_frame.sender(®_map); 1914 1915 // Deoptimize the caller frame. 1916 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1917 } 1918 1919 1920 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1921 // Called from within the owner thread, so no need for safepoint 1922 RegisterMap reg_map(thread, 1923 RegisterMap::UpdateMap::include, 1924 RegisterMap::ProcessFrames::include, 1925 RegisterMap::WalkContinuation::skip); 1926 frame stub_frame = thread->last_frame(); 1927 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1928 frame caller_frame = stub_frame.sender(®_map); 1929 return caller_frame.is_deoptimized_frame(); 1930 } 1931 1932 static const TypeFunc* make_register_finalizer_Type() { 1933 // create input type (domain) 1934 const Type **fields = TypeTuple::fields(1); 1935 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1936 // // The JavaThread* is passed to each routine as the last argument 1937 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1938 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1939 1940 // create result type (range) 1941 fields = TypeTuple::fields(0); 1942 1943 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1944 1945 return TypeFunc::make(domain, range); 1946 } 1947 1948 #if INCLUDE_JFR 1949 static const TypeFunc* make_class_id_load_barrier_Type() { 1950 // create input type (domain) 1951 const Type **fields = TypeTuple::fields(1); 1952 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS; 1953 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields); 1954 1955 // create result type (range) 1956 fields = TypeTuple::fields(0); 1957 1958 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields); 1959 1960 return TypeFunc::make(domain,range); 1961 } 1962 #endif // INCLUDE_JFR 1963 1964 //----------------------------------------------------------------------------- 1965 static const TypeFunc* make_dtrace_method_entry_exit_Type() { 1966 // create input type (domain) 1967 const Type **fields = TypeTuple::fields(2); 1968 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1969 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1970 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1971 1972 // create result type (range) 1973 fields = TypeTuple::fields(0); 1974 1975 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1976 1977 return TypeFunc::make(domain, range); 1978 } 1979 1980 static const TypeFunc* make_dtrace_object_alloc_Type() { 1981 // create input type (domain) 1982 const Type **fields = TypeTuple::fields(2); 1983 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1984 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1985 1986 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1987 1988 // create result type (range) 1989 fields = TypeTuple::fields(0); 1990 1991 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1992 1993 return TypeFunc::make(domain, range); 1994 } 1995 1996 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current)) 1997 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1998 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1999 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 2000 JRT_END 2001 2002 //----------------------------------------------------------------------------- 2003 2004 NamedCounter * volatile OptoRuntime::_named_counters = nullptr; 2005 2006 // 2007 // dump the collected NamedCounters. 2008 // 2009 void OptoRuntime::print_named_counters() { 2010 int total_lock_count = 0; 2011 int eliminated_lock_count = 0; 2012 2013 NamedCounter* c = _named_counters; 2014 while (c) { 2015 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 2016 int count = c->count(); 2017 if (count > 0) { 2018 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 2019 if (Verbose) { 2020 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 2021 } 2022 total_lock_count += count; 2023 if (eliminated) { 2024 eliminated_lock_count += count; 2025 } 2026 } 2027 } 2028 c = c->next(); 2029 } 2030 if (total_lock_count > 0) { 2031 tty->print_cr("dynamic locks: %d", total_lock_count); 2032 if (eliminated_lock_count) { 2033 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 2034 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 2035 } 2036 } 2037 } 2038 2039 // 2040 // Allocate a new NamedCounter. The JVMState is used to generate the 2041 // name which consists of method@line for the inlining tree. 2042 // 2043 2044 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 2045 int max_depth = youngest_jvms->depth(); 2046 2047 // Visit scopes from youngest to oldest. 2048 bool first = true; 2049 stringStream st; 2050 for (int depth = max_depth; depth >= 1; depth--) { 2051 JVMState* jvms = youngest_jvms->of_depth(depth); 2052 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; 2053 if (!first) { 2054 st.print(" "); 2055 } else { 2056 first = false; 2057 } 2058 int bci = jvms->bci(); 2059 if (bci < 0) bci = 0; 2060 if (m != nullptr) { 2061 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 2062 } else { 2063 st.print("no method"); 2064 } 2065 st.print("@%d", bci); 2066 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 2067 } 2068 NamedCounter* c = new NamedCounter(st.freeze(), tag); 2069 2070 // atomically add the new counter to the head of the list. We only 2071 // add counters so this is safe. 2072 NamedCounter* head; 2073 do { 2074 c->set_next(nullptr); 2075 head = _named_counters; 2076 c->set_next(head); 2077 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 2078 return c; 2079 } 2080 2081 void OptoRuntime::initialize_types() { 2082 _new_instance_Type = make_new_instance_Type(); 2083 _new_array_Type = make_new_array_Type(); 2084 _multianewarray2_Type = multianewarray_Type(2); 2085 _multianewarray3_Type = multianewarray_Type(3); 2086 _multianewarray4_Type = multianewarray_Type(4); 2087 _multianewarray5_Type = multianewarray_Type(5); 2088 _multianewarrayN_Type = make_multianewarrayN_Type(); 2089 _complete_monitor_enter_Type = make_complete_monitor_enter_Type(); 2090 _complete_monitor_exit_Type = make_complete_monitor_exit_Type(); 2091 _monitor_notify_Type = make_monitor_notify_Type(); 2092 _uncommon_trap_Type = make_uncommon_trap_Type(); 2093 _athrow_Type = make_athrow_Type(); 2094 _rethrow_Type = make_rethrow_Type(); 2095 _Math_D_D_Type = make_Math_D_D_Type(); 2096 _Math_DD_D_Type = make_Math_DD_D_Type(); 2097 _modf_Type = make_modf_Type(); 2098 _l2f_Type = make_l2f_Type(); 2099 _void_long_Type = make_void_long_Type(); 2100 _void_void_Type = make_void_void_Type(); 2101 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type(); 2102 _flush_windows_Type = make_flush_windows_Type(); 2103 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast); 2104 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast); 2105 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic); 2106 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow); 2107 _unsafe_setmemory_Type = make_setmemory_Type(); 2108 _array_fill_Type = make_array_fill_Type(); 2109 _array_sort_Type = make_array_sort_Type(); 2110 _array_partition_Type = make_array_partition_Type(); 2111 _aescrypt_block_Type = make_aescrypt_block_Type(); 2112 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type(); 2113 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type(); 2114 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type(); 2115 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type(); 2116 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true); 2117 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);; 2118 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true); 2119 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false); 2120 _double_keccak_Type = make_double_keccak_Type(); 2121 _multiplyToLen_Type = make_multiplyToLen_Type(); 2122 _montgomeryMultiply_Type = make_montgomeryMultiply_Type(); 2123 _montgomerySquare_Type = make_montgomerySquare_Type(); 2124 _squareToLen_Type = make_squareToLen_Type(); 2125 _mulAdd_Type = make_mulAdd_Type(); 2126 _bigIntegerShift_Type = make_bigIntegerShift_Type(); 2127 _vectorizedMismatch_Type = make_vectorizedMismatch_Type(); 2128 _ghash_processBlocks_Type = make_ghash_processBlocks_Type(); 2129 _chacha20Block_Type = make_chacha20Block_Type(); 2130 2131 _dilithiumAlmostNtt_Type = make_dilithiumAlmostNtt_Type(); 2132 _dilithiumAlmostInverseNtt_Type = make_dilithiumAlmostInverseNtt_Type(); 2133 _dilithiumNttMult_Type = make_dilithiumNttMult_Type(); 2134 _dilithiumMontMulByConstant_Type = make_dilithiumMontMulByConstant_Type(); 2135 _dilithiumDecomposePoly_Type = make_dilithiumDecomposePoly_Type(); 2136 2137 _base64_encodeBlock_Type = make_base64_encodeBlock_Type(); 2138 _base64_decodeBlock_Type = make_base64_decodeBlock_Type(); 2139 _string_IndexOf_Type = make_string_IndexOf_Type(); 2140 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type(); 2141 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type(); 2142 _intpoly_assign_Type = make_intpoly_assign_Type(); 2143 _updateBytesCRC32_Type = make_updateBytesCRC32_Type(); 2144 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type(); 2145 _updateBytesAdler32_Type = make_updateBytesAdler32_Type(); 2146 _osr_end_Type = make_osr_end_Type(); 2147 _register_finalizer_Type = make_register_finalizer_Type(); 2148 JFR_ONLY( 2149 _class_id_load_barrier_Type = make_class_id_load_barrier_Type(); 2150 ) 2151 #if INCLUDE_JVMTI 2152 _notify_jvmti_vthread_Type = make_notify_jvmti_vthread_Type(); 2153 #endif // INCLUDE_JVMTI 2154 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type(); 2155 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type(); 2156 } 2157 2158 int trace_exception_counter = 0; 2159 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 2160 trace_exception_counter++; 2161 stringStream tempst; 2162 2163 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 2164 exception_oop->print_value_on(&tempst); 2165 tempst.print(" in "); 2166 CodeBlob* blob = CodeCache::find_blob(exception_pc); 2167 if (blob->is_nmethod()) { 2168 blob->as_nmethod()->method()->print_value_on(&tempst); 2169 } else if (blob->is_runtime_stub()) { 2170 tempst.print("<runtime-stub>"); 2171 } else { 2172 tempst.print("<unknown>"); 2173 } 2174 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 2175 tempst.print("]"); 2176 2177 st->print_raw_cr(tempst.freeze()); 2178 } 2179 2180 const TypeFunc *OptoRuntime::store_inline_type_fields_Type() { 2181 // create input type (domain) 2182 uint total = SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2; 2183 const Type **fields = TypeTuple::fields(total); 2184 // We don't know the number of returned values and their 2185 // types. Assume all registers available to the return convention 2186 // are used. 2187 fields[TypeFunc::Parms] = TypePtr::BOTTOM; 2188 uint i = 1; 2189 for (; i < SharedRuntime::java_return_convention_max_int; i++) { 2190 fields[TypeFunc::Parms+i] = TypeInt::INT; 2191 } 2192 for (; i < total; i+=2) { 2193 fields[TypeFunc::Parms+i] = Type::DOUBLE; 2194 fields[TypeFunc::Parms+i+1] = Type::HALF; 2195 } 2196 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields); 2197 2198 // create result type (range) 2199 fields = TypeTuple::fields(1); 2200 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; 2201 2202 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields); 2203 2204 return TypeFunc::make(domain, range); 2205 } 2206 2207 const TypeFunc *OptoRuntime::pack_inline_type_Type() { 2208 // create input type (domain) 2209 uint total = 1 + SharedRuntime::java_return_convention_max_int + SharedRuntime::java_return_convention_max_float*2; 2210 const Type **fields = TypeTuple::fields(total); 2211 // We don't know the number of returned values and their 2212 // types. Assume all registers available to the return convention 2213 // are used. 2214 fields[TypeFunc::Parms] = TypeRawPtr::BOTTOM; 2215 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; 2216 uint i = 2; 2217 for (; i < SharedRuntime::java_return_convention_max_int+1; i++) { 2218 fields[TypeFunc::Parms+i] = TypeInt::INT; 2219 } 2220 for (; i < total; i+=2) { 2221 fields[TypeFunc::Parms+i] = Type::DOUBLE; 2222 fields[TypeFunc::Parms+i+1] = Type::HALF; 2223 } 2224 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + total, fields); 2225 2226 // create result type (range) 2227 fields = TypeTuple::fields(1); 2228 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; 2229 2230 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1,fields); 2231 2232 return TypeFunc::make(domain, range); 2233 } 2234 2235 JRT_BLOCK_ENTRY(void, OptoRuntime::load_unknown_inline_C(flatArrayOopDesc* array, int index, JavaThread* current)) 2236 JRT_BLOCK; 2237 oop buffer = array->read_value_from_flat_array(index, THREAD); 2238 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 2239 current->set_vm_result(buffer); 2240 JRT_BLOCK_END; 2241 JRT_END 2242 2243 const TypeFunc* OptoRuntime::load_unknown_inline_Type() { 2244 // create input type (domain) 2245 const Type** fields = TypeTuple::fields(2); 2246 fields[TypeFunc::Parms] = TypeOopPtr::NOTNULL; 2247 fields[TypeFunc::Parms+1] = TypeInt::POS; 2248 2249 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+2, fields); 2250 2251 // create result type (range) 2252 fields = TypeTuple::fields(1); 2253 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL; 2254 2255 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 2256 2257 return TypeFunc::make(domain, range); 2258 } 2259 2260 JRT_BLOCK_ENTRY(void, OptoRuntime::store_unknown_inline_C(instanceOopDesc* buffer, flatArrayOopDesc* array, int index, JavaThread* current)) 2261 JRT_BLOCK; 2262 array->write_value_to_flat_array(buffer, index, THREAD); 2263 if (HAS_PENDING_EXCEPTION) { 2264 fatal("This entry must be changed to be a non-leaf entry because writing to a flat array can now throw an exception"); 2265 } 2266 JRT_BLOCK_END; 2267 JRT_END 2268 2269 const TypeFunc* OptoRuntime::store_unknown_inline_Type() { 2270 // create input type (domain) 2271 const Type** fields = TypeTuple::fields(3); 2272 fields[TypeFunc::Parms] = TypeInstPtr::NOTNULL; 2273 fields[TypeFunc::Parms+1] = TypeOopPtr::NOTNULL; 2274 fields[TypeFunc::Parms+2] = TypeInt::POS; 2275 2276 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+3, fields); 2277 2278 // create result type (range) 2279 fields = TypeTuple::fields(0); 2280 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 2281 2282 return TypeFunc::make(domain, range); 2283 }