1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/vmClasses.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "code/scopeDesc.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "compiler/compilerDefinitions.inline.hpp" 35 #include "compiler/oopMap.hpp" 36 #include "gc/g1/g1HeapRegion.hpp" 37 #include "gc/shared/barrierSet.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/gcLocker.hpp" 40 #include "interpreter/bytecode.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "interpreter/linkResolver.hpp" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/oopFactory.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/objArrayKlass.hpp" 48 #include "oops/klass.inline.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "oops/typeArrayOop.inline.hpp" 51 #include "opto/ad.hpp" 52 #include "opto/addnode.hpp" 53 #include "opto/callnode.hpp" 54 #include "opto/cfgnode.hpp" 55 #include "opto/graphKit.hpp" 56 #include "opto/machnode.hpp" 57 #include "opto/matcher.hpp" 58 #include "opto/memnode.hpp" 59 #include "opto/mulnode.hpp" 60 #include "opto/output.hpp" 61 #include "opto/runtime.hpp" 62 #include "opto/subnode.hpp" 63 #include "prims/jvmtiExport.hpp" 64 #include "runtime/atomic.hpp" 65 #include "runtime/frame.inline.hpp" 66 #include "runtime/handles.inline.hpp" 67 #include "runtime/interfaceSupport.inline.hpp" 68 #include "runtime/java.hpp" 69 #include "runtime/javaCalls.hpp" 70 #include "runtime/perfData.inline.hpp" 71 #include "runtime/sharedRuntime.hpp" 72 #include "runtime/signature.hpp" 73 #include "runtime/stackWatermarkSet.hpp" 74 #include "runtime/synchronizer.hpp" 75 #include "runtime/threadCritical.hpp" 76 #include "runtime/threadWXSetters.inline.hpp" 77 #include "runtime/vframe.hpp" 78 #include "runtime/vframeArray.hpp" 79 #include "runtime/vframe_hp.hpp" 80 #include "services/management.hpp" 81 #include "utilities/copy.hpp" 82 #include "utilities/preserveException.hpp" 83 84 85 // For debugging purposes: 86 // To force FullGCALot inside a runtime function, add the following two lines 87 // 88 // Universe::release_fullgc_alot_dummy(); 89 // Universe::heap()->collect(); 90 // 91 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 92 93 94 #define C2_BLOB_FIELD_DEFINE(name, type) \ 95 type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; 96 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 97 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \ 98 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; 99 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \ 100 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr; 101 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE) 102 #undef C2_BLOB_FIELD_DEFINE 103 #undef C2_STUB_FIELD_DEFINE 104 #undef C2_JVMTI_STUB_FIELD_DEFINE 105 106 107 #define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", 108 #define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, 109 #define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, 110 const char* OptoRuntime::_stub_names[] = { 111 C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) 112 }; 113 #undef C2_BLOB_NAME_DEFINE 114 #undef C2_STUB_NAME_DEFINE 115 #undef C2_JVMTI_STUB_NAME_DEFINE 116 117 address OptoRuntime::_vtable_must_compile_Java = nullptr; 118 119 PerfCounter* _perf_OptoRuntime_class_init_barrier_redundant_count = nullptr; 120 121 // This should be called in an assertion at the start of OptoRuntime routines 122 // which are entered from compiled code (all of them) 123 #ifdef ASSERT 124 static bool check_compiled_frame(JavaThread* thread) { 125 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 126 RegisterMap map(thread, 127 RegisterMap::UpdateMap::skip, 128 RegisterMap::ProcessFrames::include, 129 RegisterMap::WalkContinuation::skip); 130 frame caller = thread->last_frame().sender(&map); 131 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 132 return true; 133 } 134 #endif // ASSERT 135 136 /* 137 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ 138 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ 139 if (var == nullptr) { return false; } 140 */ 141 142 #define GEN_C2_BLOB(name, type) \ 143 generate_ ## name ## _blob(); 144 145 // a few helper macros to conjure up generate_stub call arguments 146 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 147 #define C2_STUB_TYPEFUNC(name) name ## _Type 148 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) 149 #define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) 150 151 // Almost all the C functions targeted from the generated stubs are 152 // implemented locally to OptoRuntime with names that can be generated 153 // from the stub name by appending suffix '_C'. However, in two cases 154 // a common target method also needs to be called from shared runtime 155 // stubs. In these two cases the opto stubs rely on method 156 // imlementations defined in class SharedRuntime. The following 157 // defines temporarily rebind the generated names to reference the 158 // relevant implementations. 159 160 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \ 161 C2_STUB_FIELD_NAME(name) = \ 162 generate_stub(env, \ 163 C2_STUB_TYPEFUNC(name), \ 164 C2_STUB_C_FUNC(name), \ 165 C2_STUB_NAME(name), \ 166 fancy_jump, \ 167 pass_tls, \ 168 pass_retpc); \ 169 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \ 170 171 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name) 172 173 #define GEN_C2_JVMTI_STUB(name) \ 174 STUB_FIELD_NAME(name) = \ 175 generate_stub(env, \ 176 notify_jvmti_vthread_Type, \ 177 C2_JVMTI_STUB_C_FUNC(name), \ 178 C2_STUB_NAME(name), \ 179 0, \ 180 true, \ 181 false); \ 182 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \ 183 184 bool OptoRuntime::generate(ciEnv* env) { 185 init_counters(); 186 187 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB) 188 189 return true; 190 } 191 192 #undef GEN_C2_BLOB 193 194 #undef C2_STUB_FIELD_NAME 195 #undef C2_STUB_TYPEFUNC 196 #undef C2_STUB_C_FUNC 197 #undef C2_STUB_NAME 198 #undef GEN_C2_STUB 199 200 #undef C2_JVMTI_STUB_C_FUNC 201 #undef GEN_C2_JVMTI_STUB 202 // #undef gen 203 204 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr; 205 const TypeFunc* OptoRuntime::_new_array_Type = nullptr; 206 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr; 207 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr; 208 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr; 209 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr; 210 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr; 211 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr; 212 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr; 213 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr; 214 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr; 215 const TypeFunc* OptoRuntime::_athrow_Type = nullptr; 216 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr; 217 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr; 218 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr; 219 const TypeFunc* OptoRuntime::_modf_Type = nullptr; 220 const TypeFunc* OptoRuntime::_l2f_Type = nullptr; 221 const TypeFunc* OptoRuntime::_void_long_Type = nullptr; 222 const TypeFunc* OptoRuntime::_void_void_Type = nullptr; 223 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr; 224 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr; 225 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr; 226 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr; 227 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr; 228 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr; 229 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr; 230 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr; 231 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr; 232 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr; 233 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr; 234 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr; 235 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr; 236 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr; 237 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr; 238 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr; 239 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr; 240 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr; 241 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr; 242 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr; 243 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr; 244 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr; 245 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr; 246 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr; 247 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr; 248 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr; 249 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr; 250 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr; 251 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr; 252 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr; 253 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr; 254 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr; 255 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr; 256 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr; 257 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr; 258 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr; 259 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr; 260 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr; 261 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr; 262 #if INCLUDE_JFR 263 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr; 264 #endif // INCLUDE_JFR 265 #if INCLUDE_JVMTI 266 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type = nullptr; 267 #endif // INCLUDE_JVMTI 268 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr; 269 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr; 270 271 // Helper method to do generation of RunTimeStub's 272 address OptoRuntime::generate_stub(ciEnv* env, 273 TypeFunc_generator gen, address C_function, 274 const char *name, int is_fancy_jump, 275 bool pass_tls, 276 bool return_pc) { 277 278 // Matching the default directive, we currently have no method to match. 279 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompilerThread::current()->compiler()); 280 ResourceMark rm; 281 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive); 282 DirectivesStack::release(directive); 283 return C.stub_entry_point(); 284 } 285 286 const char* OptoRuntime::stub_name(address entry) { 287 #ifndef PRODUCT 288 CodeBlob* cb = CodeCache::find_blob(entry); 289 RuntimeStub* rs =(RuntimeStub *)cb; 290 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); 291 return rs->name(); 292 #else 293 // Fast implementation for product mode (maybe it should be inlined too) 294 return "runtime stub"; 295 #endif 296 } 297 298 // local methods passed as arguments to stub generator that forward 299 // control to corresponding JRT methods of SharedRuntime 300 301 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 302 oopDesc* dest, jint dest_pos, 303 jint length, JavaThread* thread) { 304 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread); 305 } 306 307 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) { 308 SharedRuntime::complete_monitor_locking_C(obj, lock, current); 309 } 310 311 312 //============================================================================= 313 // Opto compiler runtime routines 314 //============================================================================= 315 316 317 //=============================allocation====================================== 318 // We failed the fast-path allocation. Now we need to do a scavenge or GC 319 // and try allocation again. 320 321 // object allocation 322 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_instance_C, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current)) 323 JRT_BLOCK; 324 #ifndef PRODUCT 325 SharedRuntime::_new_instance_ctr++; // new instance requires GC 326 #endif 327 assert(check_compiled_frame(current), "incorrect caller"); 328 329 // These checks are cheap to make and support reflective allocation. 330 int lh = klass->layout_helper(); 331 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 332 Handle holder(current, klass->klass_holder()); // keep the klass alive 333 klass->check_valid_for_instantiation(false, THREAD); 334 if (!HAS_PENDING_EXCEPTION) { 335 InstanceKlass::cast(klass)->initialize(THREAD); 336 } 337 } 338 339 if (!HAS_PENDING_EXCEPTION) { 340 // Scavenge and allocate an instance. 341 Handle holder(current, klass->klass_holder()); // keep the klass alive 342 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 343 current->set_vm_result(result); 344 345 // Pass oops back through thread local storage. Our apparent type to Java 346 // is that we return an oop, but we can block on exit from this routine and 347 // a GC can trash the oop in C's return register. The generated stub will 348 // fetch the oop from TLS after any possible GC. 349 } 350 351 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 352 JRT_BLOCK_END; 353 354 // inform GC that we won't do card marks for initializing writes. 355 SharedRuntime::on_slowpath_allocation_exit(current); 356 JRT_END 357 358 359 // array allocation 360 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_C, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current)) 361 JRT_BLOCK; 362 #ifndef PRODUCT 363 SharedRuntime::_new_array_ctr++; // new array requires GC 364 #endif 365 assert(check_compiled_frame(current), "incorrect caller"); 366 367 // Scavenge and allocate an instance. 368 oop result; 369 370 if (array_type->is_typeArray_klass()) { 371 // The oopFactory likes to work with the element type. 372 // (We could bypass the oopFactory, since it doesn't add much value.) 373 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 374 result = oopFactory::new_typeArray(elem_type, len, THREAD); 375 } else { 376 // Although the oopFactory likes to work with the elem_type, 377 // the compiler prefers the array_type, since it must already have 378 // that latter value in hand for the fast path. 379 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 380 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 381 result = oopFactory::new_objArray(elem_type, len, THREAD); 382 } 383 384 // Pass oops back through thread local storage. Our apparent type to Java 385 // is that we return an oop, but we can block on exit from this routine and 386 // a GC can trash the oop in C's return register. The generated stub will 387 // fetch the oop from TLS after any possible GC. 388 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 389 current->set_vm_result(result); 390 JRT_BLOCK_END; 391 392 // inform GC that we won't do card marks for initializing writes. 393 SharedRuntime::on_slowpath_allocation_exit(current); 394 JRT_END 395 396 // array allocation without zeroing 397 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_nozero_C, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current)) 398 JRT_BLOCK; 399 #ifndef PRODUCT 400 SharedRuntime::_new_array_ctr++; // new array requires GC 401 #endif 402 assert(check_compiled_frame(current), "incorrect caller"); 403 404 // Scavenge and allocate an instance. 405 oop result; 406 407 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 408 // The oopFactory likes to work with the element type. 409 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 410 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 411 412 // Pass oops back through thread local storage. Our apparent type to Java 413 // is that we return an oop, but we can block on exit from this routine and 414 // a GC can trash the oop in C's return register. The generated stub will 415 // fetch the oop from TLS after any possible GC. 416 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 417 current->set_vm_result(result); 418 JRT_BLOCK_END; 419 420 421 // inform GC that we won't do card marks for initializing writes. 422 SharedRuntime::on_slowpath_allocation_exit(current); 423 424 oop result = current->vm_result(); 425 if ((len > 0) && (result != nullptr) && 426 is_deoptimized_caller_frame(current)) { 427 // Zero array here if the caller is deoptimized. 428 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); 429 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 430 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); 431 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); 432 HeapWord* obj = cast_from_oop<HeapWord*>(result); 433 if (!is_aligned(hs_bytes, BytesPerLong)) { 434 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0; 435 hs_bytes += BytesPerInt; 436 } 437 438 // Optimized zeroing. 439 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); 440 const size_t aligned_hs = hs_bytes / BytesPerLong; 441 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 442 } 443 444 JRT_END 445 446 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 447 448 // multianewarray for 2 dimensions 449 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray2_C, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current)) 450 #ifndef PRODUCT 451 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 452 #endif 453 assert(check_compiled_frame(current), "incorrect caller"); 454 assert(elem_type->is_klass(), "not a class"); 455 jint dims[2]; 456 dims[0] = len1; 457 dims[1] = len2; 458 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 459 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 460 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 461 current->set_vm_result(obj); 462 JRT_END 463 464 // multianewarray for 3 dimensions 465 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray3_C, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current)) 466 #ifndef PRODUCT 467 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 468 #endif 469 assert(check_compiled_frame(current), "incorrect caller"); 470 assert(elem_type->is_klass(), "not a class"); 471 jint dims[3]; 472 dims[0] = len1; 473 dims[1] = len2; 474 dims[2] = len3; 475 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 476 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 477 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 478 current->set_vm_result(obj); 479 JRT_END 480 481 // multianewarray for 4 dimensions 482 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray4_C, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current)) 483 #ifndef PRODUCT 484 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 485 #endif 486 assert(check_compiled_frame(current), "incorrect caller"); 487 assert(elem_type->is_klass(), "not a class"); 488 jint dims[4]; 489 dims[0] = len1; 490 dims[1] = len2; 491 dims[2] = len3; 492 dims[3] = len4; 493 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 494 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 495 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 496 current->set_vm_result(obj); 497 JRT_END 498 499 // multianewarray for 5 dimensions 500 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current)) 501 #ifndef PRODUCT 502 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 503 #endif 504 assert(check_compiled_frame(current), "incorrect caller"); 505 assert(elem_type->is_klass(), "not a class"); 506 jint dims[5]; 507 dims[0] = len1; 508 dims[1] = len2; 509 dims[2] = len3; 510 dims[3] = len4; 511 dims[4] = len5; 512 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 513 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 514 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 515 current->set_vm_result(obj); 516 JRT_END 517 518 JRT_ENTRY_PROF(void, OptoRuntime, multianewarrayN_C, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current)) 519 assert(check_compiled_frame(current), "incorrect caller"); 520 assert(elem_type->is_klass(), "not a class"); 521 assert(oop(dims)->is_typeArray(), "not an array"); 522 523 ResourceMark rm; 524 jint len = dims->length(); 525 assert(len > 0, "Dimensions array should contain data"); 526 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 527 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 528 c_dims, len); 529 530 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 531 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 532 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 533 current->set_vm_result(obj); 534 JRT_END 535 536 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notify_C, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current)) 537 538 // Very few notify/notifyAll operations find any threads on the waitset, so 539 // the dominant fast-path is to simply return. 540 // Relatedly, it's critical that notify/notifyAll be fast in order to 541 // reduce lock hold times. 542 if (!SafepointSynchronize::is_synchronizing()) { 543 if (ObjectSynchronizer::quick_notify(obj, current, false)) { 544 return; 545 } 546 } 547 548 // This is the case the fast-path above isn't provisioned to handle. 549 // The fast-path is designed to handle frequently arising cases in an efficient manner. 550 // (The fast-path is just a degenerate variant of the slow-path). 551 // Perform the dreaded state transition and pass control into the slow-path. 552 JRT_BLOCK; 553 Handle h_obj(current, obj); 554 ObjectSynchronizer::notify(h_obj, CHECK); 555 JRT_BLOCK_END; 556 JRT_END 557 558 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notifyAll_C, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current)) 559 560 if (!SafepointSynchronize::is_synchronizing() ) { 561 if (ObjectSynchronizer::quick_notify(obj, current, true)) { 562 return; 563 } 564 } 565 566 // This is the case the fast-path above isn't provisioned to handle. 567 // The fast-path is designed to handle frequently arising cases in an efficient manner. 568 // (The fast-path is just a degenerate variant of the slow-path). 569 // Perform the dreaded state transition and pass control into the slow-path. 570 JRT_BLOCK; 571 Handle h_obj(current, obj); 572 ObjectSynchronizer::notifyall(h_obj, CHECK); 573 JRT_BLOCK_END; 574 JRT_END 575 576 static const TypeFunc* make_new_instance_Type() { 577 // create input type (domain) 578 const Type **fields = TypeTuple::fields(1); 579 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 580 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 581 582 // create result type (range) 583 fields = TypeTuple::fields(1); 584 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 585 586 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 587 588 return TypeFunc::make(domain, range); 589 } 590 591 #if INCLUDE_JVMTI 592 static const TypeFunc* make_notify_jvmti_vthread_Type() { 593 // create input type (domain) 594 const Type **fields = TypeTuple::fields(2); 595 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop 596 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean 597 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 598 599 // no result type needed 600 fields = TypeTuple::fields(1); 601 fields[TypeFunc::Parms+0] = nullptr; // void 602 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 603 604 return TypeFunc::make(domain,range); 605 } 606 #endif 607 608 static const TypeFunc* make_athrow_Type() { 609 // create input type (domain) 610 const Type **fields = TypeTuple::fields(1); 611 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 612 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 613 614 // create result type (range) 615 fields = TypeTuple::fields(0); 616 617 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 618 619 return TypeFunc::make(domain, range); 620 } 621 622 static const TypeFunc* make_new_array_Type() { 623 // create input type (domain) 624 const Type **fields = TypeTuple::fields(2); 625 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 626 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 627 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 628 629 // create result type (range) 630 fields = TypeTuple::fields(1); 631 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 632 633 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 634 635 return TypeFunc::make(domain, range); 636 } 637 638 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) { 639 // create input type (domain) 640 const int nargs = ndim + 1; 641 const Type **fields = TypeTuple::fields(nargs); 642 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 643 for( int i = 1; i < nargs; i++ ) 644 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 645 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 646 647 // create result type (range) 648 fields = TypeTuple::fields(1); 649 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 650 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 651 652 return TypeFunc::make(domain, range); 653 } 654 655 static const TypeFunc* make_multianewarrayN_Type() { 656 // create input type (domain) 657 const Type **fields = TypeTuple::fields(2); 658 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 659 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 660 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 661 662 // create result type (range) 663 fields = TypeTuple::fields(1); 664 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 665 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 666 667 return TypeFunc::make(domain, range); 668 } 669 670 static const TypeFunc* make_uncommon_trap_Type() { 671 // create input type (domain) 672 const Type **fields = TypeTuple::fields(1); 673 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 674 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 675 676 // create result type (range) 677 fields = TypeTuple::fields(0); 678 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 679 680 return TypeFunc::make(domain, range); 681 } 682 683 //----------------------------------------------------------------------------- 684 // Monitor Handling 685 686 static const TypeFunc* make_complete_monitor_enter_Type() { 687 // create input type (domain) 688 const Type **fields = TypeTuple::fields(2); 689 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 690 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 691 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 692 693 // create result type (range) 694 fields = TypeTuple::fields(0); 695 696 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 697 698 return TypeFunc::make(domain,range); 699 } 700 701 //----------------------------------------------------------------------------- 702 703 static const TypeFunc* make_complete_monitor_exit_Type() { 704 // create input type (domain) 705 const Type **fields = TypeTuple::fields(3); 706 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 707 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 708 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 709 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 710 711 // create result type (range) 712 fields = TypeTuple::fields(0); 713 714 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 715 716 return TypeFunc::make(domain, range); 717 } 718 719 static const TypeFunc* make_monitor_notify_Type() { 720 // create input type (domain) 721 const Type **fields = TypeTuple::fields(1); 722 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 723 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 724 725 // create result type (range) 726 fields = TypeTuple::fields(0); 727 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 728 return TypeFunc::make(domain, range); 729 } 730 731 static const TypeFunc* make_flush_windows_Type() { 732 // create input type (domain) 733 const Type** fields = TypeTuple::fields(1); 734 fields[TypeFunc::Parms+0] = nullptr; // void 735 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 736 737 // create result type 738 fields = TypeTuple::fields(1); 739 fields[TypeFunc::Parms+0] = nullptr; // void 740 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 741 742 return TypeFunc::make(domain, range); 743 } 744 745 static const TypeFunc* make_l2f_Type() { 746 // create input type (domain) 747 const Type **fields = TypeTuple::fields(2); 748 fields[TypeFunc::Parms+0] = TypeLong::LONG; 749 fields[TypeFunc::Parms+1] = Type::HALF; 750 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 751 752 // create result type (range) 753 fields = TypeTuple::fields(1); 754 fields[TypeFunc::Parms+0] = Type::FLOAT; 755 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 756 757 return TypeFunc::make(domain, range); 758 } 759 760 static const TypeFunc* make_modf_Type() { 761 const Type **fields = TypeTuple::fields(2); 762 fields[TypeFunc::Parms+0] = Type::FLOAT; 763 fields[TypeFunc::Parms+1] = Type::FLOAT; 764 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 765 766 // create result type (range) 767 fields = TypeTuple::fields(1); 768 fields[TypeFunc::Parms+0] = Type::FLOAT; 769 770 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 771 772 return TypeFunc::make(domain, range); 773 } 774 775 static const TypeFunc* make_Math_D_D_Type() { 776 // create input type (domain) 777 const Type **fields = TypeTuple::fields(2); 778 // Symbol* name of class to be loaded 779 fields[TypeFunc::Parms+0] = Type::DOUBLE; 780 fields[TypeFunc::Parms+1] = Type::HALF; 781 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 782 783 // create result type (range) 784 fields = TypeTuple::fields(2); 785 fields[TypeFunc::Parms+0] = Type::DOUBLE; 786 fields[TypeFunc::Parms+1] = Type::HALF; 787 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 788 789 return TypeFunc::make(domain, range); 790 } 791 792 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) { 793 // create input type (domain) 794 const Type **fields = TypeTuple::fields(num_arg); 795 // Symbol* name of class to be loaded 796 assert(num_arg > 0, "must have at least 1 input"); 797 for (uint i = 0; i < num_arg; i++) { 798 fields[TypeFunc::Parms+i] = in_type; 799 } 800 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields); 801 802 // create result type (range) 803 const uint num_ret = 1; 804 fields = TypeTuple::fields(num_ret); 805 fields[TypeFunc::Parms+0] = out_type; 806 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields); 807 808 return TypeFunc::make(domain, range); 809 } 810 811 static const TypeFunc* make_Math_DD_D_Type() { 812 const Type **fields = TypeTuple::fields(4); 813 fields[TypeFunc::Parms+0] = Type::DOUBLE; 814 fields[TypeFunc::Parms+1] = Type::HALF; 815 fields[TypeFunc::Parms+2] = Type::DOUBLE; 816 fields[TypeFunc::Parms+3] = Type::HALF; 817 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 818 819 // create result type (range) 820 fields = TypeTuple::fields(2); 821 fields[TypeFunc::Parms+0] = Type::DOUBLE; 822 fields[TypeFunc::Parms+1] = Type::HALF; 823 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 824 825 return TypeFunc::make(domain, range); 826 } 827 828 //-------------- currentTimeMillis, currentTimeNanos, etc 829 830 static const TypeFunc* make_void_long_Type() { 831 // create input type (domain) 832 const Type **fields = TypeTuple::fields(0); 833 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 834 835 // create result type (range) 836 fields = TypeTuple::fields(2); 837 fields[TypeFunc::Parms+0] = TypeLong::LONG; 838 fields[TypeFunc::Parms+1] = Type::HALF; 839 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 840 841 return TypeFunc::make(domain, range); 842 } 843 844 static const TypeFunc* make_void_void_Type() { 845 // create input type (domain) 846 const Type **fields = TypeTuple::fields(0); 847 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 848 849 // create result type (range) 850 fields = TypeTuple::fields(0); 851 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 852 return TypeFunc::make(domain, range); 853 } 854 855 static const TypeFunc* make_jfr_write_checkpoint_Type() { 856 // create input type (domain) 857 const Type **fields = TypeTuple::fields(0); 858 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 859 860 // create result type (range) 861 fields = TypeTuple::fields(0); 862 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 863 return TypeFunc::make(domain, range); 864 } 865 866 867 // Takes as parameters: 868 // void *dest 869 // long size 870 // uchar byte 871 872 static const TypeFunc* make_setmemory_Type() { 873 // create input type (domain) 874 int argcnt = NOT_LP64(3) LP64_ONLY(4); 875 const Type** fields = TypeTuple::fields(argcnt); 876 int argp = TypeFunc::Parms; 877 fields[argp++] = TypePtr::NOTNULL; // dest 878 fields[argp++] = TypeX_X; // size 879 LP64_ONLY(fields[argp++] = Type::HALF); // size 880 fields[argp++] = TypeInt::UBYTE; // bytevalue 881 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 882 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 883 884 // no result type needed 885 fields = TypeTuple::fields(1); 886 fields[TypeFunc::Parms+0] = nullptr; // void 887 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 888 return TypeFunc::make(domain, range); 889 } 890 891 // arraycopy stub variations: 892 enum ArrayCopyType { 893 ac_fast, // void(ptr, ptr, size_t) 894 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 895 ac_slow, // void(ptr, int, ptr, int, int) 896 ac_generic // int(ptr, int, ptr, int, int) 897 }; 898 899 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 900 // create input type (domain) 901 int num_args = (act == ac_fast ? 3 : 5); 902 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 903 int argcnt = num_args; 904 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 905 const Type** fields = TypeTuple::fields(argcnt); 906 int argp = TypeFunc::Parms; 907 fields[argp++] = TypePtr::NOTNULL; // src 908 if (num_size_args == 0) { 909 fields[argp++] = TypeInt::INT; // src_pos 910 } 911 fields[argp++] = TypePtr::NOTNULL; // dest 912 if (num_size_args == 0) { 913 fields[argp++] = TypeInt::INT; // dest_pos 914 fields[argp++] = TypeInt::INT; // length 915 } 916 while (num_size_args-- > 0) { 917 fields[argp++] = TypeX_X; // size in whatevers (size_t) 918 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 919 } 920 if (act == ac_checkcast) { 921 fields[argp++] = TypePtr::NOTNULL; // super_klass 922 } 923 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 924 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 925 926 // create result type if needed 927 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 928 fields = TypeTuple::fields(1); 929 if (retcnt == 0) 930 fields[TypeFunc::Parms+0] = nullptr; // void 931 else 932 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 933 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 934 return TypeFunc::make(domain, range); 935 } 936 937 static const TypeFunc* make_array_fill_Type() { 938 const Type** fields; 939 int argp = TypeFunc::Parms; 940 // create input type (domain): pointer, int, size_t 941 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 942 fields[argp++] = TypePtr::NOTNULL; 943 fields[argp++] = TypeInt::INT; 944 fields[argp++] = TypeX_X; // size in whatevers (size_t) 945 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 946 const TypeTuple *domain = TypeTuple::make(argp, fields); 947 948 // create result type 949 fields = TypeTuple::fields(1); 950 fields[TypeFunc::Parms+0] = nullptr; // void 951 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 952 953 return TypeFunc::make(domain, range); 954 } 955 956 static const TypeFunc* make_array_partition_Type() { 957 // create input type (domain) 958 int num_args = 7; 959 int argcnt = num_args; 960 const Type** fields = TypeTuple::fields(argcnt); 961 int argp = TypeFunc::Parms; 962 fields[argp++] = TypePtr::NOTNULL; // array 963 fields[argp++] = TypeInt::INT; // element type 964 fields[argp++] = TypeInt::INT; // low 965 fields[argp++] = TypeInt::INT; // end 966 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array) 967 fields[argp++] = TypeInt::INT; // indexPivot1 968 fields[argp++] = TypeInt::INT; // indexPivot2 969 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 970 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 971 972 // no result type needed 973 fields = TypeTuple::fields(1); 974 fields[TypeFunc::Parms+0] = nullptr; // void 975 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 976 return TypeFunc::make(domain, range); 977 } 978 979 static const TypeFunc* make_array_sort_Type() { 980 // create input type (domain) 981 int num_args = 4; 982 int argcnt = num_args; 983 const Type** fields = TypeTuple::fields(argcnt); 984 int argp = TypeFunc::Parms; 985 fields[argp++] = TypePtr::NOTNULL; // array 986 fields[argp++] = TypeInt::INT; // element type 987 fields[argp++] = TypeInt::INT; // fromIndex 988 fields[argp++] = TypeInt::INT; // toIndex 989 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 990 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 991 992 // no result type needed 993 fields = TypeTuple::fields(1); 994 fields[TypeFunc::Parms+0] = nullptr; // void 995 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 996 return TypeFunc::make(domain, range); 997 } 998 999 static const TypeFunc* make_aescrypt_block_Type() { 1000 // create input type (domain) 1001 int num_args = 3; 1002 int argcnt = num_args; 1003 const Type** fields = TypeTuple::fields(argcnt); 1004 int argp = TypeFunc::Parms; 1005 fields[argp++] = TypePtr::NOTNULL; // src 1006 fields[argp++] = TypePtr::NOTNULL; // dest 1007 fields[argp++] = TypePtr::NOTNULL; // k array 1008 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1009 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1010 1011 // no result type needed 1012 fields = TypeTuple::fields(1); 1013 fields[TypeFunc::Parms+0] = nullptr; // void 1014 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1015 return TypeFunc::make(domain, range); 1016 } 1017 1018 static const TypeFunc* make_updateBytesCRC32_Type() { 1019 // create input type (domain) 1020 int num_args = 3; 1021 int argcnt = num_args; 1022 const Type** fields = TypeTuple::fields(argcnt); 1023 int argp = TypeFunc::Parms; 1024 fields[argp++] = TypeInt::INT; // crc 1025 fields[argp++] = TypePtr::NOTNULL; // src 1026 fields[argp++] = TypeInt::INT; // len 1027 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1028 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1029 1030 // result type needed 1031 fields = TypeTuple::fields(1); 1032 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1033 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1034 return TypeFunc::make(domain, range); 1035 } 1036 1037 static const TypeFunc* make_updateBytesCRC32C_Type() { 1038 // create input type (domain) 1039 int num_args = 4; 1040 int argcnt = num_args; 1041 const Type** fields = TypeTuple::fields(argcnt); 1042 int argp = TypeFunc::Parms; 1043 fields[argp++] = TypeInt::INT; // crc 1044 fields[argp++] = TypePtr::NOTNULL; // buf 1045 fields[argp++] = TypeInt::INT; // len 1046 fields[argp++] = TypePtr::NOTNULL; // table 1047 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1048 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1049 1050 // result type needed 1051 fields = TypeTuple::fields(1); 1052 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1053 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1054 return TypeFunc::make(domain, range); 1055 } 1056 1057 static const TypeFunc* make_updateBytesAdler32_Type() { 1058 // create input type (domain) 1059 int num_args = 3; 1060 int argcnt = num_args; 1061 const Type** fields = TypeTuple::fields(argcnt); 1062 int argp = TypeFunc::Parms; 1063 fields[argp++] = TypeInt::INT; // crc 1064 fields[argp++] = TypePtr::NOTNULL; // src + offset 1065 fields[argp++] = TypeInt::INT; // len 1066 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1067 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1068 1069 // result type needed 1070 fields = TypeTuple::fields(1); 1071 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1072 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1073 return TypeFunc::make(domain, range); 1074 } 1075 1076 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() { 1077 // create input type (domain) 1078 int num_args = 5; 1079 int argcnt = num_args; 1080 const Type** fields = TypeTuple::fields(argcnt); 1081 int argp = TypeFunc::Parms; 1082 fields[argp++] = TypePtr::NOTNULL; // src 1083 fields[argp++] = TypePtr::NOTNULL; // dest 1084 fields[argp++] = TypePtr::NOTNULL; // k array 1085 fields[argp++] = TypePtr::NOTNULL; // r array 1086 fields[argp++] = TypeInt::INT; // src len 1087 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1088 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1089 1090 // returning cipher len (int) 1091 fields = TypeTuple::fields(1); 1092 fields[TypeFunc::Parms+0] = TypeInt::INT; 1093 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1094 return TypeFunc::make(domain, range); 1095 } 1096 1097 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() { 1098 // create input type (domain) 1099 int num_args = 4; 1100 int argcnt = num_args; 1101 const Type** fields = TypeTuple::fields(argcnt); 1102 int argp = TypeFunc::Parms; 1103 fields[argp++] = TypePtr::NOTNULL; // src 1104 fields[argp++] = TypePtr::NOTNULL; // dest 1105 fields[argp++] = TypePtr::NOTNULL; // k array 1106 fields[argp++] = TypeInt::INT; // src len 1107 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1108 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1109 1110 // returning cipher len (int) 1111 fields = TypeTuple::fields(1); 1112 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1113 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1114 return TypeFunc::make(domain, range); 1115 } 1116 1117 static const TypeFunc* make_counterMode_aescrypt_Type() { 1118 // create input type (domain) 1119 int num_args = 7; 1120 int argcnt = num_args; 1121 const Type** fields = TypeTuple::fields(argcnt); 1122 int argp = TypeFunc::Parms; 1123 fields[argp++] = TypePtr::NOTNULL; // src 1124 fields[argp++] = TypePtr::NOTNULL; // dest 1125 fields[argp++] = TypePtr::NOTNULL; // k array 1126 fields[argp++] = TypePtr::NOTNULL; // counter array 1127 fields[argp++] = TypeInt::INT; // src len 1128 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 1129 fields[argp++] = TypePtr::NOTNULL; // saved used addr 1130 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1131 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1132 // returning cipher len (int) 1133 fields = TypeTuple::fields(1); 1134 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1135 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1136 return TypeFunc::make(domain, range); 1137 } 1138 1139 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() { 1140 // create input type (domain) 1141 int num_args = 8; 1142 int argcnt = num_args; 1143 const Type** fields = TypeTuple::fields(argcnt); 1144 int argp = TypeFunc::Parms; 1145 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs 1146 fields[argp++] = TypeInt::INT; // int len 1147 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs 1148 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs 1149 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj 1150 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj 1151 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj 1152 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj 1153 1154 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1155 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1156 // returning cipher len (int) 1157 fields = TypeTuple::fields(1); 1158 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1159 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1160 return TypeFunc::make(domain, range); 1161 } 1162 1163 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) { 1164 // create input type (domain) 1165 int num_args = is_sha3 ? 3 : 2; 1166 int argcnt = num_args; 1167 const Type** fields = TypeTuple::fields(argcnt); 1168 int argp = TypeFunc::Parms; 1169 fields[argp++] = TypePtr::NOTNULL; // buf 1170 fields[argp++] = TypePtr::NOTNULL; // state 1171 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1172 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1173 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1174 1175 // no result type needed 1176 fields = TypeTuple::fields(1); 1177 fields[TypeFunc::Parms+0] = nullptr; // void 1178 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1179 return TypeFunc::make(domain, range); 1180 } 1181 1182 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) { 1183 // create input type (domain) 1184 int num_args = is_sha3 ? 5 : 4; 1185 int argcnt = num_args; 1186 const Type** fields = TypeTuple::fields(argcnt); 1187 int argp = TypeFunc::Parms; 1188 fields[argp++] = TypePtr::NOTNULL; // buf 1189 fields[argp++] = TypePtr::NOTNULL; // state 1190 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1191 fields[argp++] = TypeInt::INT; // ofs 1192 fields[argp++] = TypeInt::INT; // limit 1193 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1194 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1195 1196 // returning ofs (int) 1197 fields = TypeTuple::fields(1); 1198 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1199 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1200 return TypeFunc::make(domain, range); 1201 } 1202 1203 static const TypeFunc* make_multiplyToLen_Type() { 1204 // create input type (domain) 1205 int num_args = 5; 1206 int argcnt = num_args; 1207 const Type** fields = TypeTuple::fields(argcnt); 1208 int argp = TypeFunc::Parms; 1209 fields[argp++] = TypePtr::NOTNULL; // x 1210 fields[argp++] = TypeInt::INT; // xlen 1211 fields[argp++] = TypePtr::NOTNULL; // y 1212 fields[argp++] = TypeInt::INT; // ylen 1213 fields[argp++] = TypePtr::NOTNULL; // z 1214 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1215 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1216 1217 // no result type needed 1218 fields = TypeTuple::fields(1); 1219 fields[TypeFunc::Parms+0] = nullptr; 1220 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1221 return TypeFunc::make(domain, range); 1222 } 1223 1224 static const TypeFunc* make_squareToLen_Type() { 1225 // create input type (domain) 1226 int num_args = 4; 1227 int argcnt = num_args; 1228 const Type** fields = TypeTuple::fields(argcnt); 1229 int argp = TypeFunc::Parms; 1230 fields[argp++] = TypePtr::NOTNULL; // x 1231 fields[argp++] = TypeInt::INT; // len 1232 fields[argp++] = TypePtr::NOTNULL; // z 1233 fields[argp++] = TypeInt::INT; // zlen 1234 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1235 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1236 1237 // no result type needed 1238 fields = TypeTuple::fields(1); 1239 fields[TypeFunc::Parms+0] = nullptr; 1240 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1241 return TypeFunc::make(domain, range); 1242 } 1243 1244 static const TypeFunc* make_mulAdd_Type() { 1245 // create input type (domain) 1246 int num_args = 5; 1247 int argcnt = num_args; 1248 const Type** fields = TypeTuple::fields(argcnt); 1249 int argp = TypeFunc::Parms; 1250 fields[argp++] = TypePtr::NOTNULL; // out 1251 fields[argp++] = TypePtr::NOTNULL; // in 1252 fields[argp++] = TypeInt::INT; // offset 1253 fields[argp++] = TypeInt::INT; // len 1254 fields[argp++] = TypeInt::INT; // k 1255 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1256 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1257 1258 // returning carry (int) 1259 fields = TypeTuple::fields(1); 1260 fields[TypeFunc::Parms+0] = TypeInt::INT; 1261 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1262 return TypeFunc::make(domain, range); 1263 } 1264 1265 static const TypeFunc* make_montgomeryMultiply_Type() { 1266 // create input type (domain) 1267 int num_args = 7; 1268 int argcnt = num_args; 1269 const Type** fields = TypeTuple::fields(argcnt); 1270 int argp = TypeFunc::Parms; 1271 fields[argp++] = TypePtr::NOTNULL; // a 1272 fields[argp++] = TypePtr::NOTNULL; // b 1273 fields[argp++] = TypePtr::NOTNULL; // n 1274 fields[argp++] = TypeInt::INT; // len 1275 fields[argp++] = TypeLong::LONG; // inv 1276 fields[argp++] = Type::HALF; 1277 fields[argp++] = TypePtr::NOTNULL; // result 1278 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1279 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1280 1281 // result type needed 1282 fields = TypeTuple::fields(1); 1283 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1284 1285 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1286 return TypeFunc::make(domain, range); 1287 } 1288 1289 static const TypeFunc* make_montgomerySquare_Type() { 1290 // create input type (domain) 1291 int num_args = 6; 1292 int argcnt = num_args; 1293 const Type** fields = TypeTuple::fields(argcnt); 1294 int argp = TypeFunc::Parms; 1295 fields[argp++] = TypePtr::NOTNULL; // a 1296 fields[argp++] = TypePtr::NOTNULL; // n 1297 fields[argp++] = TypeInt::INT; // len 1298 fields[argp++] = TypeLong::LONG; // inv 1299 fields[argp++] = Type::HALF; 1300 fields[argp++] = TypePtr::NOTNULL; // result 1301 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1302 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1303 1304 // result type needed 1305 fields = TypeTuple::fields(1); 1306 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1307 1308 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1309 return TypeFunc::make(domain, range); 1310 } 1311 1312 static const TypeFunc* make_bigIntegerShift_Type() { 1313 int argcnt = 5; 1314 const Type** fields = TypeTuple::fields(argcnt); 1315 int argp = TypeFunc::Parms; 1316 fields[argp++] = TypePtr::NOTNULL; // newArr 1317 fields[argp++] = TypePtr::NOTNULL; // oldArr 1318 fields[argp++] = TypeInt::INT; // newIdx 1319 fields[argp++] = TypeInt::INT; // shiftCount 1320 fields[argp++] = TypeInt::INT; // numIter 1321 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1322 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1323 1324 // no result type needed 1325 fields = TypeTuple::fields(1); 1326 fields[TypeFunc::Parms + 0] = nullptr; 1327 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1328 return TypeFunc::make(domain, range); 1329 } 1330 1331 static const TypeFunc* make_vectorizedMismatch_Type() { 1332 // create input type (domain) 1333 int num_args = 4; 1334 int argcnt = num_args; 1335 const Type** fields = TypeTuple::fields(argcnt); 1336 int argp = TypeFunc::Parms; 1337 fields[argp++] = TypePtr::NOTNULL; // obja 1338 fields[argp++] = TypePtr::NOTNULL; // objb 1339 fields[argp++] = TypeInt::INT; // length, number of elements 1340 fields[argp++] = TypeInt::INT; // log2scale, element size 1341 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1342 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1343 1344 //return mismatch index (int) 1345 fields = TypeTuple::fields(1); 1346 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1347 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1348 return TypeFunc::make(domain, range); 1349 } 1350 1351 static const TypeFunc* make_ghash_processBlocks_Type() { 1352 int argcnt = 4; 1353 1354 const Type** fields = TypeTuple::fields(argcnt); 1355 int argp = TypeFunc::Parms; 1356 fields[argp++] = TypePtr::NOTNULL; // state 1357 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1358 fields[argp++] = TypePtr::NOTNULL; // data 1359 fields[argp++] = TypeInt::INT; // blocks 1360 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1361 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1362 1363 // result type needed 1364 fields = TypeTuple::fields(1); 1365 fields[TypeFunc::Parms+0] = nullptr; // void 1366 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1367 return TypeFunc::make(domain, range); 1368 } 1369 1370 static const TypeFunc* make_chacha20Block_Type() { 1371 int argcnt = 2; 1372 1373 const Type** fields = TypeTuple::fields(argcnt); 1374 int argp = TypeFunc::Parms; 1375 fields[argp++] = TypePtr::NOTNULL; // state 1376 fields[argp++] = TypePtr::NOTNULL; // result 1377 1378 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1379 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1380 1381 // result type needed 1382 fields = TypeTuple::fields(1); 1383 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int 1384 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1385 return TypeFunc::make(domain, range); 1386 } 1387 1388 static const TypeFunc* make_base64_encodeBlock_Type() { 1389 int argcnt = 6; 1390 1391 const Type** fields = TypeTuple::fields(argcnt); 1392 int argp = TypeFunc::Parms; 1393 fields[argp++] = TypePtr::NOTNULL; // src array 1394 fields[argp++] = TypeInt::INT; // offset 1395 fields[argp++] = TypeInt::INT; // length 1396 fields[argp++] = TypePtr::NOTNULL; // dest array 1397 fields[argp++] = TypeInt::INT; // dp 1398 fields[argp++] = TypeInt::BOOL; // isURL 1399 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1400 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1401 1402 // result type needed 1403 fields = TypeTuple::fields(1); 1404 fields[TypeFunc::Parms + 0] = nullptr; // void 1405 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1406 return TypeFunc::make(domain, range); 1407 } 1408 1409 static const TypeFunc* make_string_IndexOf_Type() { 1410 int argcnt = 4; 1411 1412 const Type** fields = TypeTuple::fields(argcnt); 1413 int argp = TypeFunc::Parms; 1414 fields[argp++] = TypePtr::NOTNULL; // haystack array 1415 fields[argp++] = TypeInt::INT; // haystack length 1416 fields[argp++] = TypePtr::NOTNULL; // needle array 1417 fields[argp++] = TypeInt::INT; // needle length 1418 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1419 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1420 1421 // result type needed 1422 fields = TypeTuple::fields(1); 1423 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack 1424 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1425 return TypeFunc::make(domain, range); 1426 } 1427 1428 static const TypeFunc* make_base64_decodeBlock_Type() { 1429 int argcnt = 7; 1430 1431 const Type** fields = TypeTuple::fields(argcnt); 1432 int argp = TypeFunc::Parms; 1433 fields[argp++] = TypePtr::NOTNULL; // src array 1434 fields[argp++] = TypeInt::INT; // src offset 1435 fields[argp++] = TypeInt::INT; // src length 1436 fields[argp++] = TypePtr::NOTNULL; // dest array 1437 fields[argp++] = TypeInt::INT; // dest offset 1438 fields[argp++] = TypeInt::BOOL; // isURL 1439 fields[argp++] = TypeInt::BOOL; // isMIME 1440 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1441 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1442 1443 // result type needed 1444 fields = TypeTuple::fields(1); 1445 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst 1446 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1447 return TypeFunc::make(domain, range); 1448 } 1449 1450 static const TypeFunc* make_poly1305_processBlocks_Type() { 1451 int argcnt = 4; 1452 1453 const Type** fields = TypeTuple::fields(argcnt); 1454 int argp = TypeFunc::Parms; 1455 fields[argp++] = TypePtr::NOTNULL; // input array 1456 fields[argp++] = TypeInt::INT; // input length 1457 fields[argp++] = TypePtr::NOTNULL; // accumulator array 1458 fields[argp++] = TypePtr::NOTNULL; // r array 1459 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1460 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1461 1462 // result type needed 1463 fields = TypeTuple::fields(1); 1464 fields[TypeFunc::Parms + 0] = nullptr; // void 1465 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1466 return TypeFunc::make(domain, range); 1467 } 1468 1469 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() { 1470 int argcnt = 3; 1471 1472 const Type** fields = TypeTuple::fields(argcnt); 1473 int argp = TypeFunc::Parms; 1474 fields[argp++] = TypePtr::NOTNULL; // a array 1475 fields[argp++] = TypePtr::NOTNULL; // b array 1476 fields[argp++] = TypePtr::NOTNULL; // r(esult) array 1477 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1478 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1479 1480 // result type needed 1481 fields = TypeTuple::fields(1); 1482 fields[TypeFunc::Parms + 0] = nullptr; // void 1483 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1484 return TypeFunc::make(domain, range); 1485 } 1486 1487 static const TypeFunc* make_intpoly_assign_Type() { 1488 int argcnt = 4; 1489 1490 const Type** fields = TypeTuple::fields(argcnt); 1491 int argp = TypeFunc::Parms; 1492 fields[argp++] = TypeInt::INT; // set flag 1493 fields[argp++] = TypePtr::NOTNULL; // a array (result) 1494 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set) 1495 fields[argp++] = TypeInt::INT; // array length 1496 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1497 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1498 1499 // result type needed 1500 fields = TypeTuple::fields(1); 1501 fields[TypeFunc::Parms + 0] = nullptr; // void 1502 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1503 return TypeFunc::make(domain, range); 1504 } 1505 1506 //------------- Interpreter state for on stack replacement 1507 static const TypeFunc* make_osr_end_Type() { 1508 // create input type (domain) 1509 const Type **fields = TypeTuple::fields(1); 1510 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1511 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1512 1513 // create result type 1514 fields = TypeTuple::fields(1); 1515 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1516 fields[TypeFunc::Parms+0] = nullptr; // void 1517 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1518 return TypeFunc::make(domain, range); 1519 } 1520 1521 //------------------------------------------------------------------------------------- 1522 // register policy 1523 1524 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1525 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1526 switch (register_save_policy[reg]) { 1527 case 'C': return false; //SOC 1528 case 'E': return true ; //SOE 1529 case 'N': return false; //NS 1530 case 'A': return false; //AS 1531 } 1532 ShouldNotReachHere(); 1533 return false; 1534 } 1535 1536 //----------------------------------------------------------------------- 1537 // Exceptions 1538 // 1539 1540 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1541 1542 // The method is an entry that is always called by a C++ method not 1543 // directly from compiled code. Compiled code will call the C++ method following. 1544 // We can't allow async exception to be installed during exception processing. 1545 JRT_ENTRY_NO_ASYNC_PROF(address, OptoRuntime, handle_exception_C_helper, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm)) 1546 // The frame we rethrow the exception to might not have been processed by the GC yet. 1547 // The stack watermark barrier takes care of detecting that and ensuring the frame 1548 // has updated oops. 1549 StackWatermarkSet::after_unwind(current); 1550 1551 // Do not confuse exception_oop with pending_exception. The exception_oop 1552 // is only used to pass arguments into the method. Not for general 1553 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1554 // the runtime stubs checks this on exit. 1555 assert(current->exception_oop() != nullptr, "exception oop is found"); 1556 address handler_address = nullptr; 1557 1558 Handle exception(current, current->exception_oop()); 1559 address pc = current->exception_pc(); 1560 1561 // Clear out the exception oop and pc since looking up an 1562 // exception handler can cause class loading, which might throw an 1563 // exception and those fields are expected to be clear during 1564 // normal bytecode execution. 1565 current->clear_exception_oop_and_pc(); 1566 1567 LogTarget(Info, exceptions) lt; 1568 if (lt.is_enabled()) { 1569 ResourceMark rm; 1570 LogStream ls(lt); 1571 trace_exception(&ls, exception(), pc, ""); 1572 } 1573 1574 // for AbortVMOnException flag 1575 Exceptions::debug_check_abort(exception); 1576 1577 #ifdef ASSERT 1578 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1579 // should throw an exception here 1580 ShouldNotReachHere(); 1581 } 1582 #endif 1583 1584 // new exception handling: this method is entered only from adapters 1585 // exceptions from compiled java methods are handled in compiled code 1586 // using rethrow node 1587 1588 nm = CodeCache::find_nmethod(pc); 1589 assert(nm != nullptr, "No NMethod found"); 1590 if (nm->is_native_method()) { 1591 fatal("Native method should not have path to exception handling"); 1592 } else { 1593 // we are switching to old paradigm: search for exception handler in caller_frame 1594 // instead in exception handler of caller_frame.sender() 1595 1596 if (JvmtiExport::can_post_on_exceptions()) { 1597 // "Full-speed catching" is not necessary here, 1598 // since we're notifying the VM on every catch. 1599 // Force deoptimization and the rest of the lookup 1600 // will be fine. 1601 deoptimize_caller_frame(current); 1602 } 1603 1604 // Check the stack guard pages. If enabled, look for handler in this frame; 1605 // otherwise, forcibly unwind the frame. 1606 // 1607 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1608 bool force_unwind = !current->stack_overflow_state()->reguard_stack(); 1609 bool deopting = false; 1610 if (nm->is_deopt_pc(pc)) { 1611 deopting = true; 1612 RegisterMap map(current, 1613 RegisterMap::UpdateMap::skip, 1614 RegisterMap::ProcessFrames::include, 1615 RegisterMap::WalkContinuation::skip); 1616 frame deoptee = current->last_frame().sender(&map); 1617 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1618 // Adjust the pc back to the original throwing pc 1619 pc = deoptee.pc(); 1620 } 1621 1622 // If we are forcing an unwind because of stack overflow then deopt is 1623 // irrelevant since we are throwing the frame away anyway. 1624 1625 if (deopting && !force_unwind) { 1626 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1627 } else { 1628 1629 handler_address = 1630 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); 1631 1632 if (handler_address == nullptr) { 1633 bool recursive_exception = false; 1634 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1635 assert (handler_address != nullptr, "must have compiled handler"); 1636 // Update the exception cache only when the unwind was not forced 1637 // and there didn't happen another exception during the computation of the 1638 // compiled exception handler. Checking for exception oop equality is not 1639 // sufficient because some exceptions are pre-allocated and reused. 1640 if (!force_unwind && !recursive_exception) { 1641 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1642 } 1643 } else { 1644 #ifdef ASSERT 1645 bool recursive_exception = false; 1646 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1647 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1648 p2i(handler_address), p2i(computed_address)); 1649 #endif 1650 } 1651 } 1652 1653 current->set_exception_pc(pc); 1654 current->set_exception_handler_pc(handler_address); 1655 1656 // Check if the exception PC is a MethodHandle call site. 1657 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1658 } 1659 1660 // Restore correct return pc. Was saved above. 1661 current->set_exception_oop(exception()); 1662 return handler_address; 1663 1664 JRT_END 1665 1666 // We are entering here from exception_blob 1667 // If there is a compiled exception handler in this method, we will continue there; 1668 // otherwise we will unwind the stack and continue at the caller of top frame method 1669 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1670 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1671 // we looked up the handler for has been deoptimized in the meantime. If it has been 1672 // we must not use the handler and instead return the deopt blob. 1673 address OptoRuntime::handle_exception_C(JavaThread* current) { 1674 // 1675 // We are in Java not VM and in debug mode we have a NoHandleMark 1676 // 1677 #ifndef PRODUCT 1678 SharedRuntime::_find_handler_ctr++; // find exception handler 1679 #endif 1680 debug_only(NoHandleMark __hm;) 1681 nmethod* nm = nullptr; 1682 address handler_address = nullptr; 1683 { 1684 // Enter the VM 1685 1686 ResetNoHandleMark rnhm; 1687 handler_address = handle_exception_C_helper(current, nm); 1688 } 1689 1690 // Back in java: Use no oops, DON'T safepoint 1691 1692 // Now check to see if the handler we are returning is in a now 1693 // deoptimized frame 1694 1695 if (nm != nullptr) { 1696 RegisterMap map(current, 1697 RegisterMap::UpdateMap::skip, 1698 RegisterMap::ProcessFrames::skip, 1699 RegisterMap::WalkContinuation::skip); 1700 frame caller = current->last_frame().sender(&map); 1701 #ifdef ASSERT 1702 assert(caller.is_compiled_frame(), "must be"); 1703 #endif // ASSERT 1704 if (caller.is_deoptimized_frame()) { 1705 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1706 } 1707 } 1708 return handler_address; 1709 } 1710 1711 //------------------------------rethrow---------------------------------------- 1712 // We get here after compiled code has executed a 'RethrowNode'. The callee 1713 // is either throwing or rethrowing an exception. The callee-save registers 1714 // have been restored, synchronized objects have been unlocked and the callee 1715 // stack frame has been removed. The return address was passed in. 1716 // Exception oop is passed as the 1st argument. This routine is then called 1717 // from the stub. On exit, we know where to jump in the caller's code. 1718 // After this C code exits, the stub will pop his frame and end in a jump 1719 // (instead of a return). We enter the caller's default handler. 1720 // 1721 // This must be JRT_LEAF: 1722 // - caller will not change its state as we cannot block on exit, 1723 // therefore raw_exception_handler_for_return_address is all it takes 1724 // to handle deoptimized blobs 1725 // 1726 // However, there needs to be a safepoint check in the middle! So compiled 1727 // safepoints are completely watertight. 1728 // 1729 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1730 // 1731 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1732 // 1733 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1734 // ret_pc will have been loaded from the stack, so for AArch64 will be signed. 1735 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc)); 1736 1737 #ifndef PRODUCT 1738 SharedRuntime::_rethrow_ctr++; // count rethrows 1739 #endif 1740 assert (exception != nullptr, "should have thrown a NullPointerException"); 1741 #ifdef ASSERT 1742 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1743 // should throw an exception here 1744 ShouldNotReachHere(); 1745 } 1746 #endif 1747 1748 thread->set_vm_result(exception); 1749 // Frame not compiled (handles deoptimization blob) 1750 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1751 } 1752 1753 static const TypeFunc* make_rethrow_Type() { 1754 // create input type (domain) 1755 const Type **fields = TypeTuple::fields(1); 1756 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1757 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1758 1759 // create result type (range) 1760 fields = TypeTuple::fields(1); 1761 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1762 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1763 1764 return TypeFunc::make(domain, range); 1765 } 1766 1767 1768 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1769 // Deoptimize the caller before continuing, as the compiled 1770 // exception handler table may not be valid. 1771 if (!StressCompiledExceptionHandlers && doit) { 1772 deoptimize_caller_frame(thread); 1773 } 1774 } 1775 1776 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1777 // Called from within the owner thread, so no need for safepoint 1778 RegisterMap reg_map(thread, 1779 RegisterMap::UpdateMap::include, 1780 RegisterMap::ProcessFrames::include, 1781 RegisterMap::WalkContinuation::skip); 1782 frame stub_frame = thread->last_frame(); 1783 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1784 frame caller_frame = stub_frame.sender(®_map); 1785 1786 // Deoptimize the caller frame. 1787 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1788 } 1789 1790 1791 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1792 // Called from within the owner thread, so no need for safepoint 1793 RegisterMap reg_map(thread, 1794 RegisterMap::UpdateMap::include, 1795 RegisterMap::ProcessFrames::include, 1796 RegisterMap::WalkContinuation::skip); 1797 frame stub_frame = thread->last_frame(); 1798 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1799 frame caller_frame = stub_frame.sender(®_map); 1800 return caller_frame.is_deoptimized_frame(); 1801 } 1802 1803 static const TypeFunc* make_register_finalizer_Type() { 1804 // create input type (domain) 1805 const Type **fields = TypeTuple::fields(1); 1806 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1807 // // The JavaThread* is passed to each routine as the last argument 1808 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1809 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1810 1811 // create result type (range) 1812 fields = TypeTuple::fields(0); 1813 1814 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1815 1816 return TypeFunc::make(domain,range); 1817 } 1818 1819 const TypeFunc *OptoRuntime::class_init_barrier_Type() { 1820 // create input type (domain) 1821 const Type** fields = TypeTuple::fields(1); 1822 fields[TypeFunc::Parms+0] = TypeKlassPtr::NOTNULL; 1823 // // The JavaThread* is passed to each routine as the last argument 1824 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1825 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1826 1827 // create result type (range) 1828 fields = TypeTuple::fields(0); 1829 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 1830 return TypeFunc::make(domain,range); 1831 } 1832 1833 #if INCLUDE_JFR 1834 static const TypeFunc* make_class_id_load_barrier_Type() { 1835 // create input type (domain) 1836 const Type **fields = TypeTuple::fields(1); 1837 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS; 1838 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields); 1839 1840 // create result type (range) 1841 fields = TypeTuple::fields(0); 1842 1843 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields); 1844 1845 return TypeFunc::make(domain,range); 1846 } 1847 #endif // INCLUDE_JFR 1848 1849 //----------------------------------------------------------------------------- 1850 // runtime upcall support 1851 const TypeFunc *OptoRuntime::runtime_up_call_Type() { 1852 // create input type (domain) 1853 const Type **fields = TypeTuple::fields(1); 1854 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1855 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1856 1857 // create result type (range) 1858 fields = TypeTuple::fields(0); 1859 1860 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1861 1862 return TypeFunc::make(domain,range); 1863 } 1864 1865 //----------------------------------------------------------------------------- 1866 static const TypeFunc* make_dtrace_method_entry_exit_Type() { 1867 // create input type (domain) 1868 const Type **fields = TypeTuple::fields(2); 1869 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1870 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1871 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1872 1873 // create result type (range) 1874 fields = TypeTuple::fields(0); 1875 1876 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1877 1878 return TypeFunc::make(domain,range); 1879 } 1880 1881 static const TypeFunc* make_dtrace_object_alloc_Type() { 1882 // create input type (domain) 1883 const Type **fields = TypeTuple::fields(2); 1884 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1885 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1886 1887 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1888 1889 // create result type (range) 1890 fields = TypeTuple::fields(0); 1891 1892 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1893 1894 return TypeFunc::make(domain,range); 1895 } 1896 1897 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, register_finalizer_C, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current)) 1898 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1899 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1900 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1901 JRT_END 1902 1903 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, class_init_barrier_C, OptoRuntime::class_init_barrier_C(Klass* k, JavaThread* current)) 1904 InstanceKlass* ik = InstanceKlass::cast(k); 1905 if (ik->should_be_initialized()) { 1906 ik->initialize(CHECK); 1907 } else if (UsePerfData) { 1908 _perf_OptoRuntime_class_init_barrier_redundant_count->inc(); 1909 } 1910 JRT_END 1911 1912 //----------------------------------------------------------------------------- 1913 1914 NamedCounter * volatile OptoRuntime::_named_counters = nullptr; 1915 1916 // 1917 // dump the collected NamedCounters. 1918 // 1919 void OptoRuntime::print_named_counters() { 1920 int total_lock_count = 0; 1921 int eliminated_lock_count = 0; 1922 1923 NamedCounter* c = _named_counters; 1924 while (c) { 1925 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1926 int count = c->count(); 1927 if (count > 0) { 1928 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1929 if (Verbose) { 1930 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1931 } 1932 total_lock_count += count; 1933 if (eliminated) { 1934 eliminated_lock_count += count; 1935 } 1936 } 1937 } 1938 c = c->next(); 1939 } 1940 if (total_lock_count > 0) { 1941 tty->print_cr("dynamic locks: %d", total_lock_count); 1942 if (eliminated_lock_count) { 1943 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1944 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1945 } 1946 } 1947 } 1948 1949 // 1950 // Allocate a new NamedCounter. The JVMState is used to generate the 1951 // name which consists of method@line for the inlining tree. 1952 // 1953 1954 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1955 int max_depth = youngest_jvms->depth(); 1956 1957 // Visit scopes from youngest to oldest. 1958 bool first = true; 1959 stringStream st; 1960 for (int depth = max_depth; depth >= 1; depth--) { 1961 JVMState* jvms = youngest_jvms->of_depth(depth); 1962 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; 1963 if (!first) { 1964 st.print(" "); 1965 } else { 1966 first = false; 1967 } 1968 int bci = jvms->bci(); 1969 if (bci < 0) bci = 0; 1970 if (m != nullptr) { 1971 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 1972 } else { 1973 st.print("no method"); 1974 } 1975 st.print("@%d", bci); 1976 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1977 } 1978 NamedCounter* c = new NamedCounter(st.freeze(), tag); 1979 1980 // atomically add the new counter to the head of the list. We only 1981 // add counters so this is safe. 1982 NamedCounter* head; 1983 do { 1984 c->set_next(nullptr); 1985 head = _named_counters; 1986 c->set_next(head); 1987 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 1988 return c; 1989 } 1990 1991 void OptoRuntime::initialize_types() { 1992 _new_instance_Type = make_new_instance_Type(); 1993 _new_array_Type = make_new_array_Type(); 1994 _multianewarray2_Type = multianewarray_Type(2); 1995 _multianewarray3_Type = multianewarray_Type(3); 1996 _multianewarray4_Type = multianewarray_Type(4); 1997 _multianewarray5_Type = multianewarray_Type(5); 1998 _multianewarrayN_Type = make_multianewarrayN_Type(); 1999 _complete_monitor_enter_Type = make_complete_monitor_enter_Type(); 2000 _complete_monitor_exit_Type = make_complete_monitor_exit_Type(); 2001 _monitor_notify_Type = make_monitor_notify_Type(); 2002 _uncommon_trap_Type = make_uncommon_trap_Type(); 2003 _athrow_Type = make_athrow_Type(); 2004 _rethrow_Type = make_rethrow_Type(); 2005 _Math_D_D_Type = make_Math_D_D_Type(); 2006 _Math_DD_D_Type = make_Math_DD_D_Type(); 2007 _modf_Type = make_modf_Type(); 2008 _l2f_Type = make_l2f_Type(); 2009 _void_long_Type = make_void_long_Type(); 2010 _void_void_Type = make_void_void_Type(); 2011 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type(); 2012 _flush_windows_Type = make_flush_windows_Type(); 2013 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast); 2014 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast); 2015 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic); 2016 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow); 2017 _unsafe_setmemory_Type = make_setmemory_Type(); 2018 _array_fill_Type = make_array_fill_Type(); 2019 _array_sort_Type = make_array_sort_Type(); 2020 _array_partition_Type = make_array_partition_Type(); 2021 _aescrypt_block_Type = make_aescrypt_block_Type(); 2022 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type(); 2023 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type(); 2024 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type(); 2025 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type(); 2026 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true); 2027 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);; 2028 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true); 2029 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false); 2030 _multiplyToLen_Type = make_multiplyToLen_Type(); 2031 _montgomeryMultiply_Type = make_montgomeryMultiply_Type(); 2032 _montgomerySquare_Type = make_montgomerySquare_Type(); 2033 _squareToLen_Type = make_squareToLen_Type(); 2034 _mulAdd_Type = make_mulAdd_Type(); 2035 _bigIntegerShift_Type = make_bigIntegerShift_Type(); 2036 _vectorizedMismatch_Type = make_vectorizedMismatch_Type(); 2037 _ghash_processBlocks_Type = make_ghash_processBlocks_Type(); 2038 _chacha20Block_Type = make_chacha20Block_Type(); 2039 _base64_encodeBlock_Type = make_base64_encodeBlock_Type(); 2040 _base64_decodeBlock_Type = make_base64_decodeBlock_Type(); 2041 _string_IndexOf_Type = make_string_IndexOf_Type(); 2042 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type(); 2043 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type(); 2044 _intpoly_assign_Type = make_intpoly_assign_Type(); 2045 _updateBytesCRC32_Type = make_updateBytesCRC32_Type(); 2046 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type(); 2047 _updateBytesAdler32_Type = make_updateBytesAdler32_Type(); 2048 _osr_end_Type = make_osr_end_Type(); 2049 _register_finalizer_Type = make_register_finalizer_Type(); 2050 JFR_ONLY( 2051 _class_id_load_barrier_Type = make_class_id_load_barrier_Type(); 2052 ) 2053 #if INCLUDE_JVMTI 2054 _notify_jvmti_vthread_Type = make_notify_jvmti_vthread_Type(); 2055 #endif // INCLUDE_JVMTI 2056 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type(); 2057 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type(); 2058 } 2059 2060 int trace_exception_counter = 0; 2061 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 2062 trace_exception_counter++; 2063 stringStream tempst; 2064 2065 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 2066 exception_oop->print_value_on(&tempst); 2067 tempst.print(" in "); 2068 CodeBlob* blob = CodeCache::find_blob(exception_pc); 2069 if (blob->is_nmethod()) { 2070 blob->as_nmethod()->method()->print_value_on(&tempst); 2071 } else if (blob->is_runtime_stub()) { 2072 tempst.print("<runtime-stub>"); 2073 } else { 2074 tempst.print("<unknown>"); 2075 } 2076 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 2077 tempst.print("]"); 2078 2079 st->print_raw_cr(tempst.freeze()); 2080 } 2081 2082 #define DO_COUNTERS2(macro2, macro1) \ 2083 macro2(OptoRuntime, new_instance_C) \ 2084 macro2(OptoRuntime, new_array_C) \ 2085 macro2(OptoRuntime, new_array_nozero_C) \ 2086 macro2(OptoRuntime, multianewarray2_C) \ 2087 macro2(OptoRuntime, multianewarray3_C) \ 2088 macro2(OptoRuntime, multianewarray4_C) \ 2089 macro2(OptoRuntime, multianewarrayN_C) \ 2090 macro2(OptoRuntime, monitor_notify_C) \ 2091 macro2(OptoRuntime, monitor_notifyAll_C) \ 2092 macro2(OptoRuntime, handle_exception_C_helper) \ 2093 macro2(OptoRuntime, register_finalizer_C) \ 2094 macro2(OptoRuntime, class_init_barrier_C) \ 2095 macro1(OptoRuntime, class_init_barrier_redundant) 2096 2097 #define INIT_COUNTER_TIME_AND_CNT(sub, name) \ 2098 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \ 2099 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 2100 2101 #define INIT_COUNTER_CNT(sub, name) \ 2102 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 2103 2104 void OptoRuntime::init_counters() { 2105 assert(CompilerConfig::is_c2_enabled(), ""); 2106 2107 if (UsePerfData) { 2108 EXCEPTION_MARK; 2109 2110 DO_COUNTERS2(INIT_COUNTER_TIME_AND_CNT, INIT_COUNTER_CNT) 2111 2112 if (HAS_PENDING_EXCEPTION) { 2113 vm_exit_during_initialization("jvm_perf_init failed unexpectedly"); 2114 } 2115 } 2116 } 2117 #undef INIT_COUNTER_TIME_AND_CNT 2118 #undef INIT_COUNTER_CNT 2119 2120 #define PRINT_COUNTER_TIME_AND_CNT(sub, name) { \ 2121 jlong count = _perf_##sub##_##name##_count->get_value(); \ 2122 if (count > 0) { \ 2123 st->print_cr(" %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \ 2124 _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \ 2125 _perf_##sub##_##name##_timer->thread_counter_value_us(), \ 2126 count); \ 2127 }} 2128 2129 #define PRINT_COUNTER_CNT(sub, name) { \ 2130 jlong count = _perf_##sub##_##name##_count->get_value(); \ 2131 if (count > 0) { \ 2132 st->print_cr(" %-30s = " JLONG_FORMAT_W(5) " events", #name, count); \ 2133 }} 2134 2135 void OptoRuntime::print_counters_on(outputStream* st) { 2136 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c2_enabled()) { 2137 DO_COUNTERS2(PRINT_COUNTER_TIME_AND_CNT, PRINT_COUNTER_CNT) 2138 } else { 2139 st->print_cr(" OptoRuntime: no info (%s is disabled)", 2140 (!CompilerConfig::is_c2_enabled() ? "C2" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData"))); 2141 } 2142 } 2143 2144 #undef PRINT_COUNTER_TIME_AND_CNT 2145 #undef PRINT_COUNTER_CNT 2146 #undef DO_COUNTERS2