1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/vmClasses.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "code/scopeDesc.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "compiler/oopMap.hpp" 35 #include "gc/g1/g1HeapRegion.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/collectedHeap.hpp" 38 #include "gc/shared/gcLocker.hpp" 39 #include "interpreter/bytecode.hpp" 40 #include "interpreter/interpreter.hpp" 41 #include "interpreter/linkResolver.hpp" 42 #include "logging/log.hpp" 43 #include "logging/logStream.hpp" 44 #include "memory/oopFactory.hpp" 45 #include "memory/resourceArea.hpp" 46 #include "oops/objArrayKlass.hpp" 47 #include "oops/klass.inline.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "oops/typeArrayOop.inline.hpp" 50 #include "opto/ad.hpp" 51 #include "opto/addnode.hpp" 52 #include "opto/callnode.hpp" 53 #include "opto/cfgnode.hpp" 54 #include "opto/graphKit.hpp" 55 #include "opto/machnode.hpp" 56 #include "opto/matcher.hpp" 57 #include "opto/memnode.hpp" 58 #include "opto/mulnode.hpp" 59 #include "opto/output.hpp" 60 #include "opto/runtime.hpp" 61 #include "opto/subnode.hpp" 62 #include "prims/jvmtiExport.hpp" 63 #include "runtime/atomic.hpp" 64 #include "runtime/frame.inline.hpp" 65 #include "runtime/handles.inline.hpp" 66 #include "runtime/interfaceSupport.inline.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/sharedRuntime.hpp" 69 #include "runtime/signature.hpp" 70 #include "runtime/stackWatermarkSet.hpp" 71 #include "runtime/synchronizer.hpp" 72 #include "runtime/threadCritical.hpp" 73 #include "runtime/threadWXSetters.inline.hpp" 74 #include "runtime/vframe.hpp" 75 #include "runtime/vframeArray.hpp" 76 #include "runtime/vframe_hp.hpp" 77 #include "utilities/copy.hpp" 78 #include "utilities/preserveException.hpp" 79 80 81 // For debugging purposes: 82 // To force FullGCALot inside a runtime function, add the following two lines 83 // 84 // Universe::release_fullgc_alot_dummy(); 85 // Universe::heap()->collect(); 86 // 87 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 88 89 90 #define C2_BLOB_FIELD_DEFINE(name, type) \ 91 type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; 92 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 93 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \ 94 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; 95 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \ 96 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr; 97 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE) 98 #undef C2_BLOB_FIELD_DEFINE 99 #undef C2_STUB_FIELD_DEFINE 100 #undef C2_JVMTI_STUB_FIELD_DEFINE 101 102 #define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", 103 #define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, 104 #define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, 105 const char* OptoRuntime::_stub_names[] = { 106 C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) 107 }; 108 #undef C2_BLOB_NAME_DEFINE 109 #undef C2_STUB_NAME_DEFINE 110 #undef C2_JVMTI_STUB_NAME_DEFINE 111 112 // This should be called in an assertion at the start of OptoRuntime routines 113 // which are entered from compiled code (all of them) 114 #ifdef ASSERT 115 static bool check_compiled_frame(JavaThread* thread) { 116 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 117 RegisterMap map(thread, 118 RegisterMap::UpdateMap::skip, 119 RegisterMap::ProcessFrames::include, 120 RegisterMap::WalkContinuation::skip); 121 frame caller = thread->last_frame().sender(&map); 122 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 123 return true; 124 } 125 #endif // ASSERT 126 127 /* 128 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ 129 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ 130 if (var == nullptr) { return false; } 131 */ 132 133 #define GEN_C2_BLOB(name, type) \ 134 generate_ ## name ## _blob(); 135 136 // a few helper macros to conjure up generate_stub call arguments 137 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 138 #define C2_STUB_TYPEFUNC(name) name ## _Type 139 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) 140 #define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) 141 142 // Almost all the C functions targeted from the generated stubs are 143 // implemented locally to OptoRuntime with names that can be generated 144 // from the stub name by appending suffix '_C'. However, in two cases 145 // a common target method also needs to be called from shared runtime 146 // stubs. In these two cases the opto stubs rely on method 147 // imlementations defined in class SharedRuntime. The following 148 // defines temporarily rebind the generated names to reference the 149 // relevant implementations. 150 151 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \ 152 C2_STUB_FIELD_NAME(name) = \ 153 generate_stub(env, \ 154 C2_STUB_TYPEFUNC(name), \ 155 C2_STUB_C_FUNC(name), \ 156 C2_STUB_NAME(name), \ 157 fancy_jump, \ 158 pass_tls, \ 159 pass_retpc); \ 160 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \ 161 162 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name) 163 164 #define GEN_C2_JVMTI_STUB(name) \ 165 STUB_FIELD_NAME(name) = \ 166 generate_stub(env, \ 167 notify_jvmti_vthread_Type, \ 168 C2_JVMTI_STUB_C_FUNC(name), \ 169 C2_STUB_NAME(name), \ 170 0, \ 171 true, \ 172 false); \ 173 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \ 174 175 bool OptoRuntime::generate(ciEnv* env) { 176 177 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB) 178 179 return true; 180 } 181 182 #undef GEN_C2_BLOB 183 184 #undef C2_STUB_FIELD_NAME 185 #undef C2_STUB_TYPEFUNC 186 #undef C2_STUB_C_FUNC 187 #undef C2_STUB_NAME 188 #undef GEN_C2_STUB 189 190 #undef C2_JVMTI_STUB_C_FUNC 191 #undef GEN_C2_JVMTI_STUB 192 // #undef gen 193 194 const TypeFunc* OptoRuntime::_new_instance_Type = nullptr; 195 const TypeFunc* OptoRuntime::_new_array_Type = nullptr; 196 const TypeFunc* OptoRuntime::_multianewarray2_Type = nullptr; 197 const TypeFunc* OptoRuntime::_multianewarray3_Type = nullptr; 198 const TypeFunc* OptoRuntime::_multianewarray4_Type = nullptr; 199 const TypeFunc* OptoRuntime::_multianewarray5_Type = nullptr; 200 const TypeFunc* OptoRuntime::_multianewarrayN_Type = nullptr; 201 const TypeFunc* OptoRuntime::_complete_monitor_enter_Type = nullptr; 202 const TypeFunc* OptoRuntime::_complete_monitor_exit_Type = nullptr; 203 const TypeFunc* OptoRuntime::_monitor_notify_Type = nullptr; 204 const TypeFunc* OptoRuntime::_uncommon_trap_Type = nullptr; 205 const TypeFunc* OptoRuntime::_athrow_Type = nullptr; 206 const TypeFunc* OptoRuntime::_rethrow_Type = nullptr; 207 const TypeFunc* OptoRuntime::_Math_D_D_Type = nullptr; 208 const TypeFunc* OptoRuntime::_Math_DD_D_Type = nullptr; 209 const TypeFunc* OptoRuntime::_modf_Type = nullptr; 210 const TypeFunc* OptoRuntime::_l2f_Type = nullptr; 211 const TypeFunc* OptoRuntime::_void_long_Type = nullptr; 212 const TypeFunc* OptoRuntime::_void_void_Type = nullptr; 213 const TypeFunc* OptoRuntime::_jfr_write_checkpoint_Type = nullptr; 214 const TypeFunc* OptoRuntime::_flush_windows_Type = nullptr; 215 const TypeFunc* OptoRuntime::_fast_arraycopy_Type = nullptr; 216 const TypeFunc* OptoRuntime::_checkcast_arraycopy_Type = nullptr; 217 const TypeFunc* OptoRuntime::_generic_arraycopy_Type = nullptr; 218 const TypeFunc* OptoRuntime::_slow_arraycopy_Type = nullptr; 219 const TypeFunc* OptoRuntime::_unsafe_setmemory_Type = nullptr; 220 const TypeFunc* OptoRuntime::_array_fill_Type = nullptr; 221 const TypeFunc* OptoRuntime::_array_sort_Type = nullptr; 222 const TypeFunc* OptoRuntime::_array_partition_Type = nullptr; 223 const TypeFunc* OptoRuntime::_aescrypt_block_Type = nullptr; 224 const TypeFunc* OptoRuntime::_cipherBlockChaining_aescrypt_Type = nullptr; 225 const TypeFunc* OptoRuntime::_electronicCodeBook_aescrypt_Type = nullptr; 226 const TypeFunc* OptoRuntime::_counterMode_aescrypt_Type = nullptr; 227 const TypeFunc* OptoRuntime::_galoisCounterMode_aescrypt_Type = nullptr; 228 const TypeFunc* OptoRuntime::_digestBase_implCompress_with_sha3_Type = nullptr; 229 const TypeFunc* OptoRuntime::_digestBase_implCompress_without_sha3_Type = nullptr; 230 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_with_sha3_Type = nullptr; 231 const TypeFunc* OptoRuntime::_digestBase_implCompressMB_without_sha3_Type = nullptr; 232 const TypeFunc* OptoRuntime::_multiplyToLen_Type = nullptr; 233 const TypeFunc* OptoRuntime::_montgomeryMultiply_Type = nullptr; 234 const TypeFunc* OptoRuntime::_montgomerySquare_Type = nullptr; 235 const TypeFunc* OptoRuntime::_squareToLen_Type = nullptr; 236 const TypeFunc* OptoRuntime::_mulAdd_Type = nullptr; 237 const TypeFunc* OptoRuntime::_bigIntegerShift_Type = nullptr; 238 const TypeFunc* OptoRuntime::_vectorizedMismatch_Type = nullptr; 239 const TypeFunc* OptoRuntime::_ghash_processBlocks_Type = nullptr; 240 const TypeFunc* OptoRuntime::_chacha20Block_Type = nullptr; 241 const TypeFunc* OptoRuntime::_base64_encodeBlock_Type = nullptr; 242 const TypeFunc* OptoRuntime::_base64_decodeBlock_Type = nullptr; 243 const TypeFunc* OptoRuntime::_string_IndexOf_Type = nullptr; 244 const TypeFunc* OptoRuntime::_poly1305_processBlocks_Type = nullptr; 245 const TypeFunc* OptoRuntime::_intpoly_montgomeryMult_P256_Type = nullptr; 246 const TypeFunc* OptoRuntime::_intpoly_assign_Type = nullptr; 247 const TypeFunc* OptoRuntime::_updateBytesCRC32_Type = nullptr; 248 const TypeFunc* OptoRuntime::_updateBytesCRC32C_Type = nullptr; 249 const TypeFunc* OptoRuntime::_updateBytesAdler32_Type = nullptr; 250 const TypeFunc* OptoRuntime::_osr_end_Type = nullptr; 251 const TypeFunc* OptoRuntime::_register_finalizer_Type = nullptr; 252 #if INCLUDE_JFR 253 const TypeFunc* OptoRuntime::_class_id_load_barrier_Type = nullptr; 254 #endif // INCLUDE_JFR 255 #if INCLUDE_JVMTI 256 const TypeFunc* OptoRuntime::_notify_jvmti_vthread_Type = nullptr; 257 #endif // INCLUDE_JVMTI 258 const TypeFunc* OptoRuntime::_dtrace_method_entry_exit_Type = nullptr; 259 const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr; 260 261 // Helper method to do generation of RunTimeStub's 262 address OptoRuntime::generate_stub(ciEnv* env, 263 TypeFunc_generator gen, address C_function, 264 const char *name, int is_fancy_jump, 265 bool pass_tls, 266 bool return_pc) { 267 268 // Matching the default directive, we currently have no method to match. 269 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization)); 270 ResourceMark rm; 271 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive); 272 DirectivesStack::release(directive); 273 return C.stub_entry_point(); 274 } 275 276 const char* OptoRuntime::stub_name(address entry) { 277 #ifndef PRODUCT 278 CodeBlob* cb = CodeCache::find_blob(entry); 279 RuntimeStub* rs =(RuntimeStub *)cb; 280 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); 281 return rs->name(); 282 #else 283 // Fast implementation for product mode (maybe it should be inlined too) 284 return "runtime stub"; 285 #endif 286 } 287 288 // local methods passed as arguments to stub generator that forward 289 // control to corresponding JRT methods of SharedRuntime 290 291 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 292 oopDesc* dest, jint dest_pos, 293 jint length, JavaThread* thread) { 294 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread); 295 } 296 297 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) { 298 SharedRuntime::complete_monitor_locking_C(obj, lock, current); 299 } 300 301 302 //============================================================================= 303 // Opto compiler runtime routines 304 //============================================================================= 305 306 307 //=============================allocation====================================== 308 // We failed the fast-path allocation. Now we need to do a scavenge or GC 309 // and try allocation again. 310 311 // object allocation 312 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current)) 313 JRT_BLOCK; 314 #ifndef PRODUCT 315 SharedRuntime::_new_instance_ctr++; // new instance requires GC 316 #endif 317 assert(check_compiled_frame(current), "incorrect caller"); 318 319 // These checks are cheap to make and support reflective allocation. 320 int lh = klass->layout_helper(); 321 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 322 Handle holder(current, klass->klass_holder()); // keep the klass alive 323 klass->check_valid_for_instantiation(false, THREAD); 324 if (!HAS_PENDING_EXCEPTION) { 325 InstanceKlass::cast(klass)->initialize(THREAD); 326 } 327 } 328 329 if (!HAS_PENDING_EXCEPTION) { 330 // Scavenge and allocate an instance. 331 Handle holder(current, klass->klass_holder()); // keep the klass alive 332 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 333 current->set_vm_result(result); 334 335 // Pass oops back through thread local storage. Our apparent type to Java 336 // is that we return an oop, but we can block on exit from this routine and 337 // a GC can trash the oop in C's return register. The generated stub will 338 // fetch the oop from TLS after any possible GC. 339 } 340 341 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 342 JRT_BLOCK_END; 343 344 // inform GC that we won't do card marks for initializing writes. 345 SharedRuntime::on_slowpath_allocation_exit(current); 346 JRT_END 347 348 349 // array allocation 350 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current)) 351 JRT_BLOCK; 352 #ifndef PRODUCT 353 SharedRuntime::_new_array_ctr++; // new array requires GC 354 #endif 355 assert(check_compiled_frame(current), "incorrect caller"); 356 357 // Scavenge and allocate an instance. 358 oop result; 359 360 if (array_type->is_typeArray_klass()) { 361 // The oopFactory likes to work with the element type. 362 // (We could bypass the oopFactory, since it doesn't add much value.) 363 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 364 result = oopFactory::new_typeArray(elem_type, len, THREAD); 365 } else { 366 // Although the oopFactory likes to work with the elem_type, 367 // the compiler prefers the array_type, since it must already have 368 // that latter value in hand for the fast path. 369 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 370 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 371 result = oopFactory::new_objArray(elem_type, len, THREAD); 372 } 373 374 // Pass oops back through thread local storage. Our apparent type to Java 375 // is that we return an oop, but we can block on exit from this routine and 376 // a GC can trash the oop in C's return register. The generated stub will 377 // fetch the oop from TLS after any possible GC. 378 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 379 current->set_vm_result(result); 380 JRT_BLOCK_END; 381 382 // inform GC that we won't do card marks for initializing writes. 383 SharedRuntime::on_slowpath_allocation_exit(current); 384 JRT_END 385 386 // array allocation without zeroing 387 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current)) 388 JRT_BLOCK; 389 #ifndef PRODUCT 390 SharedRuntime::_new_array_ctr++; // new array requires GC 391 #endif 392 assert(check_compiled_frame(current), "incorrect caller"); 393 394 // Scavenge and allocate an instance. 395 oop result; 396 397 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 398 // The oopFactory likes to work with the element type. 399 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 400 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 401 402 // Pass oops back through thread local storage. Our apparent type to Java 403 // is that we return an oop, but we can block on exit from this routine and 404 // a GC can trash the oop in C's return register. The generated stub will 405 // fetch the oop from TLS after any possible GC. 406 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 407 current->set_vm_result(result); 408 JRT_BLOCK_END; 409 410 411 // inform GC that we won't do card marks for initializing writes. 412 SharedRuntime::on_slowpath_allocation_exit(current); 413 414 oop result = current->vm_result(); 415 if ((len > 0) && (result != nullptr) && 416 is_deoptimized_caller_frame(current)) { 417 // Zero array here if the caller is deoptimized. 418 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); 419 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 420 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); 421 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); 422 HeapWord* obj = cast_from_oop<HeapWord*>(result); 423 if (!is_aligned(hs_bytes, BytesPerLong)) { 424 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0; 425 hs_bytes += BytesPerInt; 426 } 427 428 // Optimized zeroing. 429 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); 430 const size_t aligned_hs = hs_bytes / BytesPerLong; 431 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 432 } 433 434 JRT_END 435 436 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 437 438 // multianewarray for 2 dimensions 439 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current)) 440 #ifndef PRODUCT 441 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 442 #endif 443 assert(check_compiled_frame(current), "incorrect caller"); 444 assert(elem_type->is_klass(), "not a class"); 445 jint dims[2]; 446 dims[0] = len1; 447 dims[1] = len2; 448 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 449 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 450 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 451 current->set_vm_result(obj); 452 JRT_END 453 454 // multianewarray for 3 dimensions 455 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current)) 456 #ifndef PRODUCT 457 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 458 #endif 459 assert(check_compiled_frame(current), "incorrect caller"); 460 assert(elem_type->is_klass(), "not a class"); 461 jint dims[3]; 462 dims[0] = len1; 463 dims[1] = len2; 464 dims[2] = len3; 465 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 466 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 467 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 468 current->set_vm_result(obj); 469 JRT_END 470 471 // multianewarray for 4 dimensions 472 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current)) 473 #ifndef PRODUCT 474 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 475 #endif 476 assert(check_compiled_frame(current), "incorrect caller"); 477 assert(elem_type->is_klass(), "not a class"); 478 jint dims[4]; 479 dims[0] = len1; 480 dims[1] = len2; 481 dims[2] = len3; 482 dims[3] = len4; 483 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 484 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 485 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 486 current->set_vm_result(obj); 487 JRT_END 488 489 // multianewarray for 5 dimensions 490 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current)) 491 #ifndef PRODUCT 492 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 493 #endif 494 assert(check_compiled_frame(current), "incorrect caller"); 495 assert(elem_type->is_klass(), "not a class"); 496 jint dims[5]; 497 dims[0] = len1; 498 dims[1] = len2; 499 dims[2] = len3; 500 dims[3] = len4; 501 dims[4] = len5; 502 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 503 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 504 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 505 current->set_vm_result(obj); 506 JRT_END 507 508 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current)) 509 assert(check_compiled_frame(current), "incorrect caller"); 510 assert(elem_type->is_klass(), "not a class"); 511 assert(oop(dims)->is_typeArray(), "not an array"); 512 513 ResourceMark rm; 514 jint len = dims->length(); 515 assert(len > 0, "Dimensions array should contain data"); 516 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 517 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 518 c_dims, len); 519 520 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 521 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 522 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 523 current->set_vm_result(obj); 524 JRT_END 525 526 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current)) 527 528 // Very few notify/notifyAll operations find any threads on the waitset, so 529 // the dominant fast-path is to simply return. 530 // Relatedly, it's critical that notify/notifyAll be fast in order to 531 // reduce lock hold times. 532 if (!SafepointSynchronize::is_synchronizing()) { 533 if (ObjectSynchronizer::quick_notify(obj, current, false)) { 534 return; 535 } 536 } 537 538 // This is the case the fast-path above isn't provisioned to handle. 539 // The fast-path is designed to handle frequently arising cases in an efficient manner. 540 // (The fast-path is just a degenerate variant of the slow-path). 541 // Perform the dreaded state transition and pass control into the slow-path. 542 JRT_BLOCK; 543 Handle h_obj(current, obj); 544 ObjectSynchronizer::notify(h_obj, CHECK); 545 JRT_BLOCK_END; 546 JRT_END 547 548 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current)) 549 550 if (!SafepointSynchronize::is_synchronizing() ) { 551 if (ObjectSynchronizer::quick_notify(obj, current, true)) { 552 return; 553 } 554 } 555 556 // This is the case the fast-path above isn't provisioned to handle. 557 // The fast-path is designed to handle frequently arising cases in an efficient manner. 558 // (The fast-path is just a degenerate variant of the slow-path). 559 // Perform the dreaded state transition and pass control into the slow-path. 560 JRT_BLOCK; 561 Handle h_obj(current, obj); 562 ObjectSynchronizer::notifyall(h_obj, CHECK); 563 JRT_BLOCK_END; 564 JRT_END 565 566 static const TypeFunc* make_new_instance_Type() { 567 // create input type (domain) 568 const Type **fields = TypeTuple::fields(1); 569 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 570 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 571 572 // create result type (range) 573 fields = TypeTuple::fields(1); 574 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 575 576 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 577 578 return TypeFunc::make(domain, range); 579 } 580 581 #if INCLUDE_JVMTI 582 static const TypeFunc* make_notify_jvmti_vthread_Type() { 583 // create input type (domain) 584 const Type **fields = TypeTuple::fields(2); 585 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop 586 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean 587 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 588 589 // no result type needed 590 fields = TypeTuple::fields(1); 591 fields[TypeFunc::Parms+0] = nullptr; // void 592 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 593 594 return TypeFunc::make(domain,range); 595 } 596 #endif 597 598 static const TypeFunc* make_athrow_Type() { 599 // create input type (domain) 600 const Type **fields = TypeTuple::fields(1); 601 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 602 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 603 604 // create result type (range) 605 fields = TypeTuple::fields(0); 606 607 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 608 609 return TypeFunc::make(domain, range); 610 } 611 612 static const TypeFunc* make_new_array_Type() { 613 // create input type (domain) 614 const Type **fields = TypeTuple::fields(2); 615 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 616 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 617 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 618 619 // create result type (range) 620 fields = TypeTuple::fields(1); 621 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 622 623 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 624 625 return TypeFunc::make(domain, range); 626 } 627 628 const TypeFunc* OptoRuntime::multianewarray_Type(int ndim) { 629 // create input type (domain) 630 const int nargs = ndim + 1; 631 const Type **fields = TypeTuple::fields(nargs); 632 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 633 for( int i = 1; i < nargs; i++ ) 634 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 635 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 636 637 // create result type (range) 638 fields = TypeTuple::fields(1); 639 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 640 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 641 642 return TypeFunc::make(domain, range); 643 } 644 645 static const TypeFunc* make_multianewarrayN_Type() { 646 // create input type (domain) 647 const Type **fields = TypeTuple::fields(2); 648 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 649 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 650 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 651 652 // create result type (range) 653 fields = TypeTuple::fields(1); 654 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 655 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 656 657 return TypeFunc::make(domain, range); 658 } 659 660 static const TypeFunc* make_uncommon_trap_Type() { 661 // create input type (domain) 662 const Type **fields = TypeTuple::fields(1); 663 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 664 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 665 666 // create result type (range) 667 fields = TypeTuple::fields(0); 668 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 669 670 return TypeFunc::make(domain, range); 671 } 672 673 //----------------------------------------------------------------------------- 674 // Monitor Handling 675 676 static const TypeFunc* make_complete_monitor_enter_Type() { 677 // create input type (domain) 678 const Type **fields = TypeTuple::fields(2); 679 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 680 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 681 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 682 683 // create result type (range) 684 fields = TypeTuple::fields(0); 685 686 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 687 688 return TypeFunc::make(domain,range); 689 } 690 691 //----------------------------------------------------------------------------- 692 693 static const TypeFunc* make_complete_monitor_exit_Type() { 694 // create input type (domain) 695 const Type **fields = TypeTuple::fields(3); 696 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 697 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 698 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 699 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 700 701 // create result type (range) 702 fields = TypeTuple::fields(0); 703 704 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 705 706 return TypeFunc::make(domain, range); 707 } 708 709 static const TypeFunc* make_monitor_notify_Type() { 710 // create input type (domain) 711 const Type **fields = TypeTuple::fields(1); 712 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 713 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 714 715 // create result type (range) 716 fields = TypeTuple::fields(0); 717 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 718 return TypeFunc::make(domain, range); 719 } 720 721 static const TypeFunc* make_flush_windows_Type() { 722 // create input type (domain) 723 const Type** fields = TypeTuple::fields(1); 724 fields[TypeFunc::Parms+0] = nullptr; // void 725 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 726 727 // create result type 728 fields = TypeTuple::fields(1); 729 fields[TypeFunc::Parms+0] = nullptr; // void 730 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 731 732 return TypeFunc::make(domain, range); 733 } 734 735 static const TypeFunc* make_l2f_Type() { 736 // create input type (domain) 737 const Type **fields = TypeTuple::fields(2); 738 fields[TypeFunc::Parms+0] = TypeLong::LONG; 739 fields[TypeFunc::Parms+1] = Type::HALF; 740 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 741 742 // create result type (range) 743 fields = TypeTuple::fields(1); 744 fields[TypeFunc::Parms+0] = Type::FLOAT; 745 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 746 747 return TypeFunc::make(domain, range); 748 } 749 750 static const TypeFunc* make_modf_Type() { 751 const Type **fields = TypeTuple::fields(2); 752 fields[TypeFunc::Parms+0] = Type::FLOAT; 753 fields[TypeFunc::Parms+1] = Type::FLOAT; 754 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 755 756 // create result type (range) 757 fields = TypeTuple::fields(1); 758 fields[TypeFunc::Parms+0] = Type::FLOAT; 759 760 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 761 762 return TypeFunc::make(domain, range); 763 } 764 765 static const TypeFunc* make_Math_D_D_Type() { 766 // create input type (domain) 767 const Type **fields = TypeTuple::fields(2); 768 // Symbol* name of class to be loaded 769 fields[TypeFunc::Parms+0] = Type::DOUBLE; 770 fields[TypeFunc::Parms+1] = Type::HALF; 771 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 772 773 // create result type (range) 774 fields = TypeTuple::fields(2); 775 fields[TypeFunc::Parms+0] = Type::DOUBLE; 776 fields[TypeFunc::Parms+1] = Type::HALF; 777 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 778 779 return TypeFunc::make(domain, range); 780 } 781 782 const TypeFunc* OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) { 783 // create input type (domain) 784 const Type **fields = TypeTuple::fields(num_arg); 785 // Symbol* name of class to be loaded 786 assert(num_arg > 0, "must have at least 1 input"); 787 for (uint i = 0; i < num_arg; i++) { 788 fields[TypeFunc::Parms+i] = in_type; 789 } 790 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields); 791 792 // create result type (range) 793 const uint num_ret = 1; 794 fields = TypeTuple::fields(num_ret); 795 fields[TypeFunc::Parms+0] = out_type; 796 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields); 797 798 return TypeFunc::make(domain, range); 799 } 800 801 static const TypeFunc* make_Math_DD_D_Type() { 802 const Type **fields = TypeTuple::fields(4); 803 fields[TypeFunc::Parms+0] = Type::DOUBLE; 804 fields[TypeFunc::Parms+1] = Type::HALF; 805 fields[TypeFunc::Parms+2] = Type::DOUBLE; 806 fields[TypeFunc::Parms+3] = Type::HALF; 807 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 808 809 // create result type (range) 810 fields = TypeTuple::fields(2); 811 fields[TypeFunc::Parms+0] = Type::DOUBLE; 812 fields[TypeFunc::Parms+1] = Type::HALF; 813 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 814 815 return TypeFunc::make(domain, range); 816 } 817 818 //-------------- currentTimeMillis, currentTimeNanos, etc 819 820 static const TypeFunc* make_void_long_Type() { 821 // create input type (domain) 822 const Type **fields = TypeTuple::fields(0); 823 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 824 825 // create result type (range) 826 fields = TypeTuple::fields(2); 827 fields[TypeFunc::Parms+0] = TypeLong::LONG; 828 fields[TypeFunc::Parms+1] = Type::HALF; 829 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 830 831 return TypeFunc::make(domain, range); 832 } 833 834 static const TypeFunc* make_void_void_Type() { 835 // create input type (domain) 836 const Type **fields = TypeTuple::fields(0); 837 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 838 839 // create result type (range) 840 fields = TypeTuple::fields(0); 841 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 842 return TypeFunc::make(domain, range); 843 } 844 845 static const TypeFunc* make_jfr_write_checkpoint_Type() { 846 // create input type (domain) 847 const Type **fields = TypeTuple::fields(0); 848 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 849 850 // create result type (range) 851 fields = TypeTuple::fields(0); 852 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 853 return TypeFunc::make(domain, range); 854 } 855 856 857 // Takes as parameters: 858 // void *dest 859 // long size 860 // uchar byte 861 862 static const TypeFunc* make_setmemory_Type() { 863 // create input type (domain) 864 int argcnt = NOT_LP64(3) LP64_ONLY(4); 865 const Type** fields = TypeTuple::fields(argcnt); 866 int argp = TypeFunc::Parms; 867 fields[argp++] = TypePtr::NOTNULL; // dest 868 fields[argp++] = TypeX_X; // size 869 LP64_ONLY(fields[argp++] = Type::HALF); // size 870 fields[argp++] = TypeInt::UBYTE; // bytevalue 871 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 872 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 873 874 // no result type needed 875 fields = TypeTuple::fields(1); 876 fields[TypeFunc::Parms+0] = nullptr; // void 877 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 878 return TypeFunc::make(domain, range); 879 } 880 881 // arraycopy stub variations: 882 enum ArrayCopyType { 883 ac_fast, // void(ptr, ptr, size_t) 884 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 885 ac_slow, // void(ptr, int, ptr, int, int) 886 ac_generic // int(ptr, int, ptr, int, int) 887 }; 888 889 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 890 // create input type (domain) 891 int num_args = (act == ac_fast ? 3 : 5); 892 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 893 int argcnt = num_args; 894 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 895 const Type** fields = TypeTuple::fields(argcnt); 896 int argp = TypeFunc::Parms; 897 fields[argp++] = TypePtr::NOTNULL; // src 898 if (num_size_args == 0) { 899 fields[argp++] = TypeInt::INT; // src_pos 900 } 901 fields[argp++] = TypePtr::NOTNULL; // dest 902 if (num_size_args == 0) { 903 fields[argp++] = TypeInt::INT; // dest_pos 904 fields[argp++] = TypeInt::INT; // length 905 } 906 while (num_size_args-- > 0) { 907 fields[argp++] = TypeX_X; // size in whatevers (size_t) 908 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 909 } 910 if (act == ac_checkcast) { 911 fields[argp++] = TypePtr::NOTNULL; // super_klass 912 } 913 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 914 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 915 916 // create result type if needed 917 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 918 fields = TypeTuple::fields(1); 919 if (retcnt == 0) 920 fields[TypeFunc::Parms+0] = nullptr; // void 921 else 922 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 923 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 924 return TypeFunc::make(domain, range); 925 } 926 927 static const TypeFunc* make_array_fill_Type() { 928 const Type** fields; 929 int argp = TypeFunc::Parms; 930 // create input type (domain): pointer, int, size_t 931 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 932 fields[argp++] = TypePtr::NOTNULL; 933 fields[argp++] = TypeInt::INT; 934 fields[argp++] = TypeX_X; // size in whatevers (size_t) 935 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 936 const TypeTuple *domain = TypeTuple::make(argp, fields); 937 938 // create result type 939 fields = TypeTuple::fields(1); 940 fields[TypeFunc::Parms+0] = nullptr; // void 941 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 942 943 return TypeFunc::make(domain, range); 944 } 945 946 static const TypeFunc* make_array_partition_Type() { 947 // create input type (domain) 948 int num_args = 7; 949 int argcnt = num_args; 950 const Type** fields = TypeTuple::fields(argcnt); 951 int argp = TypeFunc::Parms; 952 fields[argp++] = TypePtr::NOTNULL; // array 953 fields[argp++] = TypeInt::INT; // element type 954 fields[argp++] = TypeInt::INT; // low 955 fields[argp++] = TypeInt::INT; // end 956 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array) 957 fields[argp++] = TypeInt::INT; // indexPivot1 958 fields[argp++] = TypeInt::INT; // indexPivot2 959 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 960 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 961 962 // no result type needed 963 fields = TypeTuple::fields(1); 964 fields[TypeFunc::Parms+0] = nullptr; // void 965 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 966 return TypeFunc::make(domain, range); 967 } 968 969 static const TypeFunc* make_array_sort_Type() { 970 // create input type (domain) 971 int num_args = 4; 972 int argcnt = num_args; 973 const Type** fields = TypeTuple::fields(argcnt); 974 int argp = TypeFunc::Parms; 975 fields[argp++] = TypePtr::NOTNULL; // array 976 fields[argp++] = TypeInt::INT; // element type 977 fields[argp++] = TypeInt::INT; // fromIndex 978 fields[argp++] = TypeInt::INT; // toIndex 979 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 980 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 981 982 // no result type needed 983 fields = TypeTuple::fields(1); 984 fields[TypeFunc::Parms+0] = nullptr; // void 985 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 986 return TypeFunc::make(domain, range); 987 } 988 989 static const TypeFunc* make_aescrypt_block_Type() { 990 // create input type (domain) 991 int num_args = 3; 992 int argcnt = num_args; 993 const Type** fields = TypeTuple::fields(argcnt); 994 int argp = TypeFunc::Parms; 995 fields[argp++] = TypePtr::NOTNULL; // src 996 fields[argp++] = TypePtr::NOTNULL; // dest 997 fields[argp++] = TypePtr::NOTNULL; // k array 998 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 999 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1000 1001 // no result type needed 1002 fields = TypeTuple::fields(1); 1003 fields[TypeFunc::Parms+0] = nullptr; // void 1004 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1005 return TypeFunc::make(domain, range); 1006 } 1007 1008 static const TypeFunc* make_updateBytesCRC32_Type() { 1009 // create input type (domain) 1010 int num_args = 3; 1011 int argcnt = num_args; 1012 const Type** fields = TypeTuple::fields(argcnt); 1013 int argp = TypeFunc::Parms; 1014 fields[argp++] = TypeInt::INT; // crc 1015 fields[argp++] = TypePtr::NOTNULL; // src 1016 fields[argp++] = TypeInt::INT; // len 1017 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1018 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1019 1020 // result type needed 1021 fields = TypeTuple::fields(1); 1022 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1023 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1024 return TypeFunc::make(domain, range); 1025 } 1026 1027 static const TypeFunc* make_updateBytesCRC32C_Type() { 1028 // create input type (domain) 1029 int num_args = 4; 1030 int argcnt = num_args; 1031 const Type** fields = TypeTuple::fields(argcnt); 1032 int argp = TypeFunc::Parms; 1033 fields[argp++] = TypeInt::INT; // crc 1034 fields[argp++] = TypePtr::NOTNULL; // buf 1035 fields[argp++] = TypeInt::INT; // len 1036 fields[argp++] = TypePtr::NOTNULL; // table 1037 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1038 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1039 1040 // result type needed 1041 fields = TypeTuple::fields(1); 1042 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1043 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1044 return TypeFunc::make(domain, range); 1045 } 1046 1047 static const TypeFunc* make_updateBytesAdler32_Type() { 1048 // create input type (domain) 1049 int num_args = 3; 1050 int argcnt = num_args; 1051 const Type** fields = TypeTuple::fields(argcnt); 1052 int argp = TypeFunc::Parms; 1053 fields[argp++] = TypeInt::INT; // crc 1054 fields[argp++] = TypePtr::NOTNULL; // src + offset 1055 fields[argp++] = TypeInt::INT; // len 1056 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1057 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1058 1059 // result type needed 1060 fields = TypeTuple::fields(1); 1061 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1062 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1063 return TypeFunc::make(domain, range); 1064 } 1065 1066 static const TypeFunc* make_cipherBlockChaining_aescrypt_Type() { 1067 // create input type (domain) 1068 int num_args = 5; 1069 int argcnt = num_args; 1070 const Type** fields = TypeTuple::fields(argcnt); 1071 int argp = TypeFunc::Parms; 1072 fields[argp++] = TypePtr::NOTNULL; // src 1073 fields[argp++] = TypePtr::NOTNULL; // dest 1074 fields[argp++] = TypePtr::NOTNULL; // k array 1075 fields[argp++] = TypePtr::NOTNULL; // r array 1076 fields[argp++] = TypeInt::INT; // src len 1077 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1078 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1079 1080 // returning cipher len (int) 1081 fields = TypeTuple::fields(1); 1082 fields[TypeFunc::Parms+0] = TypeInt::INT; 1083 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1084 return TypeFunc::make(domain, range); 1085 } 1086 1087 static const TypeFunc* make_electronicCodeBook_aescrypt_Type() { 1088 // create input type (domain) 1089 int num_args = 4; 1090 int argcnt = num_args; 1091 const Type** fields = TypeTuple::fields(argcnt); 1092 int argp = TypeFunc::Parms; 1093 fields[argp++] = TypePtr::NOTNULL; // src 1094 fields[argp++] = TypePtr::NOTNULL; // dest 1095 fields[argp++] = TypePtr::NOTNULL; // k array 1096 fields[argp++] = TypeInt::INT; // src len 1097 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1098 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1099 1100 // returning cipher len (int) 1101 fields = TypeTuple::fields(1); 1102 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1103 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1104 return TypeFunc::make(domain, range); 1105 } 1106 1107 static const TypeFunc* make_counterMode_aescrypt_Type() { 1108 // create input type (domain) 1109 int num_args = 7; 1110 int argcnt = num_args; 1111 const Type** fields = TypeTuple::fields(argcnt); 1112 int argp = TypeFunc::Parms; 1113 fields[argp++] = TypePtr::NOTNULL; // src 1114 fields[argp++] = TypePtr::NOTNULL; // dest 1115 fields[argp++] = TypePtr::NOTNULL; // k array 1116 fields[argp++] = TypePtr::NOTNULL; // counter array 1117 fields[argp++] = TypeInt::INT; // src len 1118 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 1119 fields[argp++] = TypePtr::NOTNULL; // saved used addr 1120 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1121 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1122 // returning cipher len (int) 1123 fields = TypeTuple::fields(1); 1124 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1125 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1126 return TypeFunc::make(domain, range); 1127 } 1128 1129 static const TypeFunc* make_galoisCounterMode_aescrypt_Type() { 1130 // create input type (domain) 1131 int num_args = 8; 1132 int argcnt = num_args; 1133 const Type** fields = TypeTuple::fields(argcnt); 1134 int argp = TypeFunc::Parms; 1135 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs 1136 fields[argp++] = TypeInt::INT; // int len 1137 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs 1138 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs 1139 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj 1140 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj 1141 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj 1142 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj 1143 1144 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1145 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1146 // returning cipher len (int) 1147 fields = TypeTuple::fields(1); 1148 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1149 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1150 return TypeFunc::make(domain, range); 1151 } 1152 1153 static const TypeFunc* make_digestBase_implCompress_Type(bool is_sha3) { 1154 // create input type (domain) 1155 int num_args = is_sha3 ? 3 : 2; 1156 int argcnt = num_args; 1157 const Type** fields = TypeTuple::fields(argcnt); 1158 int argp = TypeFunc::Parms; 1159 fields[argp++] = TypePtr::NOTNULL; // buf 1160 fields[argp++] = TypePtr::NOTNULL; // state 1161 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1162 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1163 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1164 1165 // no result type needed 1166 fields = TypeTuple::fields(1); 1167 fields[TypeFunc::Parms+0] = nullptr; // void 1168 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1169 return TypeFunc::make(domain, range); 1170 } 1171 1172 static const TypeFunc* make_digestBase_implCompressMB_Type(bool is_sha3) { 1173 // create input type (domain) 1174 int num_args = is_sha3 ? 5 : 4; 1175 int argcnt = num_args; 1176 const Type** fields = TypeTuple::fields(argcnt); 1177 int argp = TypeFunc::Parms; 1178 fields[argp++] = TypePtr::NOTNULL; // buf 1179 fields[argp++] = TypePtr::NOTNULL; // state 1180 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1181 fields[argp++] = TypeInt::INT; // ofs 1182 fields[argp++] = TypeInt::INT; // limit 1183 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1184 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1185 1186 // returning ofs (int) 1187 fields = TypeTuple::fields(1); 1188 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1189 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1190 return TypeFunc::make(domain, range); 1191 } 1192 1193 static const TypeFunc* make_multiplyToLen_Type() { 1194 // create input type (domain) 1195 int num_args = 5; 1196 int argcnt = num_args; 1197 const Type** fields = TypeTuple::fields(argcnt); 1198 int argp = TypeFunc::Parms; 1199 fields[argp++] = TypePtr::NOTNULL; // x 1200 fields[argp++] = TypeInt::INT; // xlen 1201 fields[argp++] = TypePtr::NOTNULL; // y 1202 fields[argp++] = TypeInt::INT; // ylen 1203 fields[argp++] = TypePtr::NOTNULL; // z 1204 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1205 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1206 1207 // no result type needed 1208 fields = TypeTuple::fields(1); 1209 fields[TypeFunc::Parms+0] = nullptr; 1210 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1211 return TypeFunc::make(domain, range); 1212 } 1213 1214 static const TypeFunc* make_squareToLen_Type() { 1215 // create input type (domain) 1216 int num_args = 4; 1217 int argcnt = num_args; 1218 const Type** fields = TypeTuple::fields(argcnt); 1219 int argp = TypeFunc::Parms; 1220 fields[argp++] = TypePtr::NOTNULL; // x 1221 fields[argp++] = TypeInt::INT; // len 1222 fields[argp++] = TypePtr::NOTNULL; // z 1223 fields[argp++] = TypeInt::INT; // zlen 1224 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1225 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1226 1227 // no result type needed 1228 fields = TypeTuple::fields(1); 1229 fields[TypeFunc::Parms+0] = nullptr; 1230 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1231 return TypeFunc::make(domain, range); 1232 } 1233 1234 static const TypeFunc* make_mulAdd_Type() { 1235 // create input type (domain) 1236 int num_args = 5; 1237 int argcnt = num_args; 1238 const Type** fields = TypeTuple::fields(argcnt); 1239 int argp = TypeFunc::Parms; 1240 fields[argp++] = TypePtr::NOTNULL; // out 1241 fields[argp++] = TypePtr::NOTNULL; // in 1242 fields[argp++] = TypeInt::INT; // offset 1243 fields[argp++] = TypeInt::INT; // len 1244 fields[argp++] = TypeInt::INT; // k 1245 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1246 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1247 1248 // returning carry (int) 1249 fields = TypeTuple::fields(1); 1250 fields[TypeFunc::Parms+0] = TypeInt::INT; 1251 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1252 return TypeFunc::make(domain, range); 1253 } 1254 1255 static const TypeFunc* make_montgomeryMultiply_Type() { 1256 // create input type (domain) 1257 int num_args = 7; 1258 int argcnt = num_args; 1259 const Type** fields = TypeTuple::fields(argcnt); 1260 int argp = TypeFunc::Parms; 1261 fields[argp++] = TypePtr::NOTNULL; // a 1262 fields[argp++] = TypePtr::NOTNULL; // b 1263 fields[argp++] = TypePtr::NOTNULL; // n 1264 fields[argp++] = TypeInt::INT; // len 1265 fields[argp++] = TypeLong::LONG; // inv 1266 fields[argp++] = Type::HALF; 1267 fields[argp++] = TypePtr::NOTNULL; // result 1268 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1269 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1270 1271 // result type needed 1272 fields = TypeTuple::fields(1); 1273 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1274 1275 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1276 return TypeFunc::make(domain, range); 1277 } 1278 1279 static const TypeFunc* make_montgomerySquare_Type() { 1280 // create input type (domain) 1281 int num_args = 6; 1282 int argcnt = num_args; 1283 const Type** fields = TypeTuple::fields(argcnt); 1284 int argp = TypeFunc::Parms; 1285 fields[argp++] = TypePtr::NOTNULL; // a 1286 fields[argp++] = TypePtr::NOTNULL; // n 1287 fields[argp++] = TypeInt::INT; // len 1288 fields[argp++] = TypeLong::LONG; // inv 1289 fields[argp++] = Type::HALF; 1290 fields[argp++] = TypePtr::NOTNULL; // result 1291 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1292 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1293 1294 // result type needed 1295 fields = TypeTuple::fields(1); 1296 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1297 1298 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1299 return TypeFunc::make(domain, range); 1300 } 1301 1302 static const TypeFunc* make_bigIntegerShift_Type() { 1303 int argcnt = 5; 1304 const Type** fields = TypeTuple::fields(argcnt); 1305 int argp = TypeFunc::Parms; 1306 fields[argp++] = TypePtr::NOTNULL; // newArr 1307 fields[argp++] = TypePtr::NOTNULL; // oldArr 1308 fields[argp++] = TypeInt::INT; // newIdx 1309 fields[argp++] = TypeInt::INT; // shiftCount 1310 fields[argp++] = TypeInt::INT; // numIter 1311 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1312 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1313 1314 // no result type needed 1315 fields = TypeTuple::fields(1); 1316 fields[TypeFunc::Parms + 0] = nullptr; 1317 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1318 return TypeFunc::make(domain, range); 1319 } 1320 1321 static const TypeFunc* make_vectorizedMismatch_Type() { 1322 // create input type (domain) 1323 int num_args = 4; 1324 int argcnt = num_args; 1325 const Type** fields = TypeTuple::fields(argcnt); 1326 int argp = TypeFunc::Parms; 1327 fields[argp++] = TypePtr::NOTNULL; // obja 1328 fields[argp++] = TypePtr::NOTNULL; // objb 1329 fields[argp++] = TypeInt::INT; // length, number of elements 1330 fields[argp++] = TypeInt::INT; // log2scale, element size 1331 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1332 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1333 1334 //return mismatch index (int) 1335 fields = TypeTuple::fields(1); 1336 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1337 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1338 return TypeFunc::make(domain, range); 1339 } 1340 1341 static const TypeFunc* make_ghash_processBlocks_Type() { 1342 int argcnt = 4; 1343 1344 const Type** fields = TypeTuple::fields(argcnt); 1345 int argp = TypeFunc::Parms; 1346 fields[argp++] = TypePtr::NOTNULL; // state 1347 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1348 fields[argp++] = TypePtr::NOTNULL; // data 1349 fields[argp++] = TypeInt::INT; // blocks 1350 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1351 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1352 1353 // result type needed 1354 fields = TypeTuple::fields(1); 1355 fields[TypeFunc::Parms+0] = nullptr; // void 1356 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1357 return TypeFunc::make(domain, range); 1358 } 1359 1360 static const TypeFunc* make_chacha20Block_Type() { 1361 int argcnt = 2; 1362 1363 const Type** fields = TypeTuple::fields(argcnt); 1364 int argp = TypeFunc::Parms; 1365 fields[argp++] = TypePtr::NOTNULL; // state 1366 fields[argp++] = TypePtr::NOTNULL; // result 1367 1368 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1369 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1370 1371 // result type needed 1372 fields = TypeTuple::fields(1); 1373 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int 1374 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1375 return TypeFunc::make(domain, range); 1376 } 1377 1378 static const TypeFunc* make_base64_encodeBlock_Type() { 1379 int argcnt = 6; 1380 1381 const Type** fields = TypeTuple::fields(argcnt); 1382 int argp = TypeFunc::Parms; 1383 fields[argp++] = TypePtr::NOTNULL; // src array 1384 fields[argp++] = TypeInt::INT; // offset 1385 fields[argp++] = TypeInt::INT; // length 1386 fields[argp++] = TypePtr::NOTNULL; // dest array 1387 fields[argp++] = TypeInt::INT; // dp 1388 fields[argp++] = TypeInt::BOOL; // isURL 1389 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1390 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1391 1392 // result type needed 1393 fields = TypeTuple::fields(1); 1394 fields[TypeFunc::Parms + 0] = nullptr; // void 1395 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1396 return TypeFunc::make(domain, range); 1397 } 1398 1399 static const TypeFunc* make_string_IndexOf_Type() { 1400 int argcnt = 4; 1401 1402 const Type** fields = TypeTuple::fields(argcnt); 1403 int argp = TypeFunc::Parms; 1404 fields[argp++] = TypePtr::NOTNULL; // haystack array 1405 fields[argp++] = TypeInt::INT; // haystack length 1406 fields[argp++] = TypePtr::NOTNULL; // needle array 1407 fields[argp++] = TypeInt::INT; // needle length 1408 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1409 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1410 1411 // result type needed 1412 fields = TypeTuple::fields(1); 1413 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack 1414 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1415 return TypeFunc::make(domain, range); 1416 } 1417 1418 static const TypeFunc* make_base64_decodeBlock_Type() { 1419 int argcnt = 7; 1420 1421 const Type** fields = TypeTuple::fields(argcnt); 1422 int argp = TypeFunc::Parms; 1423 fields[argp++] = TypePtr::NOTNULL; // src array 1424 fields[argp++] = TypeInt::INT; // src offset 1425 fields[argp++] = TypeInt::INT; // src length 1426 fields[argp++] = TypePtr::NOTNULL; // dest array 1427 fields[argp++] = TypeInt::INT; // dest offset 1428 fields[argp++] = TypeInt::BOOL; // isURL 1429 fields[argp++] = TypeInt::BOOL; // isMIME 1430 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1431 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1432 1433 // result type needed 1434 fields = TypeTuple::fields(1); 1435 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst 1436 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1437 return TypeFunc::make(domain, range); 1438 } 1439 1440 static const TypeFunc* make_poly1305_processBlocks_Type() { 1441 int argcnt = 4; 1442 1443 const Type** fields = TypeTuple::fields(argcnt); 1444 int argp = TypeFunc::Parms; 1445 fields[argp++] = TypePtr::NOTNULL; // input array 1446 fields[argp++] = TypeInt::INT; // input length 1447 fields[argp++] = TypePtr::NOTNULL; // accumulator array 1448 fields[argp++] = TypePtr::NOTNULL; // r array 1449 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1450 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1451 1452 // result type needed 1453 fields = TypeTuple::fields(1); 1454 fields[TypeFunc::Parms + 0] = nullptr; // void 1455 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1456 return TypeFunc::make(domain, range); 1457 } 1458 1459 static const TypeFunc* make_intpoly_montgomeryMult_P256_Type() { 1460 int argcnt = 3; 1461 1462 const Type** fields = TypeTuple::fields(argcnt); 1463 int argp = TypeFunc::Parms; 1464 fields[argp++] = TypePtr::NOTNULL; // a array 1465 fields[argp++] = TypePtr::NOTNULL; // b array 1466 fields[argp++] = TypePtr::NOTNULL; // r(esult) array 1467 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1468 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1469 1470 // result type needed 1471 fields = TypeTuple::fields(1); 1472 fields[TypeFunc::Parms + 0] = nullptr; // void 1473 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1474 return TypeFunc::make(domain, range); 1475 } 1476 1477 static const TypeFunc* make_intpoly_assign_Type() { 1478 int argcnt = 4; 1479 1480 const Type** fields = TypeTuple::fields(argcnt); 1481 int argp = TypeFunc::Parms; 1482 fields[argp++] = TypeInt::INT; // set flag 1483 fields[argp++] = TypePtr::NOTNULL; // a array (result) 1484 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set) 1485 fields[argp++] = TypeInt::INT; // array length 1486 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1487 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1488 1489 // result type needed 1490 fields = TypeTuple::fields(1); 1491 fields[TypeFunc::Parms + 0] = nullptr; // void 1492 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1493 return TypeFunc::make(domain, range); 1494 } 1495 1496 //------------- Interpreter state for on stack replacement 1497 static const TypeFunc* make_osr_end_Type() { 1498 // create input type (domain) 1499 const Type **fields = TypeTuple::fields(1); 1500 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1501 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1502 1503 // create result type 1504 fields = TypeTuple::fields(1); 1505 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1506 fields[TypeFunc::Parms+0] = nullptr; // void 1507 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1508 return TypeFunc::make(domain, range); 1509 } 1510 1511 //------------------------------------------------------------------------------------- 1512 // register policy 1513 1514 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1515 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1516 switch (register_save_policy[reg]) { 1517 case 'C': return false; //SOC 1518 case 'E': return true ; //SOE 1519 case 'N': return false; //NS 1520 case 'A': return false; //AS 1521 } 1522 ShouldNotReachHere(); 1523 return false; 1524 } 1525 1526 //----------------------------------------------------------------------- 1527 // Exceptions 1528 // 1529 1530 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1531 1532 // The method is an entry that is always called by a C++ method not 1533 // directly from compiled code. Compiled code will call the C++ method following. 1534 // We can't allow async exception to be installed during exception processing. 1535 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm)) 1536 // The frame we rethrow the exception to might not have been processed by the GC yet. 1537 // The stack watermark barrier takes care of detecting that and ensuring the frame 1538 // has updated oops. 1539 StackWatermarkSet::after_unwind(current); 1540 1541 // Do not confuse exception_oop with pending_exception. The exception_oop 1542 // is only used to pass arguments into the method. Not for general 1543 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1544 // the runtime stubs checks this on exit. 1545 assert(current->exception_oop() != nullptr, "exception oop is found"); 1546 address handler_address = nullptr; 1547 1548 Handle exception(current, current->exception_oop()); 1549 address pc = current->exception_pc(); 1550 1551 // Clear out the exception oop and pc since looking up an 1552 // exception handler can cause class loading, which might throw an 1553 // exception and those fields are expected to be clear during 1554 // normal bytecode execution. 1555 current->clear_exception_oop_and_pc(); 1556 1557 LogTarget(Info, exceptions) lt; 1558 if (lt.is_enabled()) { 1559 ResourceMark rm; 1560 LogStream ls(lt); 1561 trace_exception(&ls, exception(), pc, ""); 1562 } 1563 1564 // for AbortVMOnException flag 1565 Exceptions::debug_check_abort(exception); 1566 1567 #ifdef ASSERT 1568 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1569 // should throw an exception here 1570 ShouldNotReachHere(); 1571 } 1572 #endif 1573 1574 // new exception handling: this method is entered only from adapters 1575 // exceptions from compiled java methods are handled in compiled code 1576 // using rethrow node 1577 1578 nm = CodeCache::find_nmethod(pc); 1579 assert(nm != nullptr, "No NMethod found"); 1580 if (nm->is_native_method()) { 1581 fatal("Native method should not have path to exception handling"); 1582 } else { 1583 // we are switching to old paradigm: search for exception handler in caller_frame 1584 // instead in exception handler of caller_frame.sender() 1585 1586 if (JvmtiExport::can_post_on_exceptions()) { 1587 // "Full-speed catching" is not necessary here, 1588 // since we're notifying the VM on every catch. 1589 // Force deoptimization and the rest of the lookup 1590 // will be fine. 1591 deoptimize_caller_frame(current); 1592 } 1593 1594 // Check the stack guard pages. If enabled, look for handler in this frame; 1595 // otherwise, forcibly unwind the frame. 1596 // 1597 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1598 bool force_unwind = !current->stack_overflow_state()->reguard_stack(); 1599 bool deopting = false; 1600 if (nm->is_deopt_pc(pc)) { 1601 deopting = true; 1602 RegisterMap map(current, 1603 RegisterMap::UpdateMap::skip, 1604 RegisterMap::ProcessFrames::include, 1605 RegisterMap::WalkContinuation::skip); 1606 frame deoptee = current->last_frame().sender(&map); 1607 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1608 // Adjust the pc back to the original throwing pc 1609 pc = deoptee.pc(); 1610 } 1611 1612 // If we are forcing an unwind because of stack overflow then deopt is 1613 // irrelevant since we are throwing the frame away anyway. 1614 1615 if (deopting && !force_unwind) { 1616 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1617 } else { 1618 1619 handler_address = 1620 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); 1621 1622 if (handler_address == nullptr) { 1623 bool recursive_exception = false; 1624 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1625 assert (handler_address != nullptr, "must have compiled handler"); 1626 // Update the exception cache only when the unwind was not forced 1627 // and there didn't happen another exception during the computation of the 1628 // compiled exception handler. Checking for exception oop equality is not 1629 // sufficient because some exceptions are pre-allocated and reused. 1630 if (!force_unwind && !recursive_exception) { 1631 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1632 } 1633 } else { 1634 #ifdef ASSERT 1635 bool recursive_exception = false; 1636 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1637 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1638 p2i(handler_address), p2i(computed_address)); 1639 #endif 1640 } 1641 } 1642 1643 current->set_exception_pc(pc); 1644 current->set_exception_handler_pc(handler_address); 1645 1646 // Check if the exception PC is a MethodHandle call site. 1647 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1648 } 1649 1650 // Restore correct return pc. Was saved above. 1651 current->set_exception_oop(exception()); 1652 return handler_address; 1653 1654 JRT_END 1655 1656 // We are entering here from exception_blob 1657 // If there is a compiled exception handler in this method, we will continue there; 1658 // otherwise we will unwind the stack and continue at the caller of top frame method 1659 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1660 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1661 // we looked up the handler for has been deoptimized in the meantime. If it has been 1662 // we must not use the handler and instead return the deopt blob. 1663 address OptoRuntime::handle_exception_C(JavaThread* current) { 1664 // 1665 // We are in Java not VM and in debug mode we have a NoHandleMark 1666 // 1667 #ifndef PRODUCT 1668 SharedRuntime::_find_handler_ctr++; // find exception handler 1669 #endif 1670 debug_only(NoHandleMark __hm;) 1671 nmethod* nm = nullptr; 1672 address handler_address = nullptr; 1673 { 1674 // Enter the VM 1675 1676 ResetNoHandleMark rnhm; 1677 handler_address = handle_exception_C_helper(current, nm); 1678 } 1679 1680 // Back in java: Use no oops, DON'T safepoint 1681 1682 // Now check to see if the handler we are returning is in a now 1683 // deoptimized frame 1684 1685 if (nm != nullptr) { 1686 RegisterMap map(current, 1687 RegisterMap::UpdateMap::skip, 1688 RegisterMap::ProcessFrames::skip, 1689 RegisterMap::WalkContinuation::skip); 1690 frame caller = current->last_frame().sender(&map); 1691 #ifdef ASSERT 1692 assert(caller.is_compiled_frame(), "must be"); 1693 #endif // ASSERT 1694 if (caller.is_deoptimized_frame()) { 1695 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1696 } 1697 } 1698 return handler_address; 1699 } 1700 1701 //------------------------------rethrow---------------------------------------- 1702 // We get here after compiled code has executed a 'RethrowNode'. The callee 1703 // is either throwing or rethrowing an exception. The callee-save registers 1704 // have been restored, synchronized objects have been unlocked and the callee 1705 // stack frame has been removed. The return address was passed in. 1706 // Exception oop is passed as the 1st argument. This routine is then called 1707 // from the stub. On exit, we know where to jump in the caller's code. 1708 // After this C code exits, the stub will pop his frame and end in a jump 1709 // (instead of a return). We enter the caller's default handler. 1710 // 1711 // This must be JRT_LEAF: 1712 // - caller will not change its state as we cannot block on exit, 1713 // therefore raw_exception_handler_for_return_address is all it takes 1714 // to handle deoptimized blobs 1715 // 1716 // However, there needs to be a safepoint check in the middle! So compiled 1717 // safepoints are completely watertight. 1718 // 1719 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1720 // 1721 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1722 // 1723 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1724 // ret_pc will have been loaded from the stack, so for AArch64 will be signed. 1725 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc)); 1726 1727 #ifndef PRODUCT 1728 SharedRuntime::_rethrow_ctr++; // count rethrows 1729 #endif 1730 assert (exception != nullptr, "should have thrown a NullPointerException"); 1731 #ifdef ASSERT 1732 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1733 // should throw an exception here 1734 ShouldNotReachHere(); 1735 } 1736 #endif 1737 1738 thread->set_vm_result(exception); 1739 // Frame not compiled (handles deoptimization blob) 1740 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1741 } 1742 1743 static const TypeFunc* make_rethrow_Type() { 1744 // create input type (domain) 1745 const Type **fields = TypeTuple::fields(1); 1746 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1747 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1748 1749 // create result type (range) 1750 fields = TypeTuple::fields(1); 1751 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1752 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1753 1754 return TypeFunc::make(domain, range); 1755 } 1756 1757 1758 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1759 // Deoptimize the caller before continuing, as the compiled 1760 // exception handler table may not be valid. 1761 if (!StressCompiledExceptionHandlers && doit) { 1762 deoptimize_caller_frame(thread); 1763 } 1764 } 1765 1766 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1767 // Called from within the owner thread, so no need for safepoint 1768 RegisterMap reg_map(thread, 1769 RegisterMap::UpdateMap::include, 1770 RegisterMap::ProcessFrames::include, 1771 RegisterMap::WalkContinuation::skip); 1772 frame stub_frame = thread->last_frame(); 1773 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1774 frame caller_frame = stub_frame.sender(®_map); 1775 1776 // Deoptimize the caller frame. 1777 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1778 } 1779 1780 1781 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1782 // Called from within the owner thread, so no need for safepoint 1783 RegisterMap reg_map(thread, 1784 RegisterMap::UpdateMap::include, 1785 RegisterMap::ProcessFrames::include, 1786 RegisterMap::WalkContinuation::skip); 1787 frame stub_frame = thread->last_frame(); 1788 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1789 frame caller_frame = stub_frame.sender(®_map); 1790 return caller_frame.is_deoptimized_frame(); 1791 } 1792 1793 static const TypeFunc* make_register_finalizer_Type() { 1794 // create input type (domain) 1795 const Type **fields = TypeTuple::fields(1); 1796 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1797 // // The JavaThread* is passed to each routine as the last argument 1798 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1799 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1800 1801 // create result type (range) 1802 fields = TypeTuple::fields(0); 1803 1804 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1805 1806 return TypeFunc::make(domain,range); 1807 } 1808 1809 #if INCLUDE_JFR 1810 static const TypeFunc* make_class_id_load_barrier_Type() { 1811 // create input type (domain) 1812 const Type **fields = TypeTuple::fields(1); 1813 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS; 1814 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields); 1815 1816 // create result type (range) 1817 fields = TypeTuple::fields(0); 1818 1819 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields); 1820 1821 return TypeFunc::make(domain,range); 1822 } 1823 #endif // INCLUDE_JFR 1824 1825 //----------------------------------------------------------------------------- 1826 static const TypeFunc* make_dtrace_method_entry_exit_Type() { 1827 // create input type (domain) 1828 const Type **fields = TypeTuple::fields(2); 1829 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1830 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1831 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1832 1833 // create result type (range) 1834 fields = TypeTuple::fields(0); 1835 1836 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1837 1838 return TypeFunc::make(domain,range); 1839 } 1840 1841 static const TypeFunc* make_dtrace_object_alloc_Type() { 1842 // create input type (domain) 1843 const Type **fields = TypeTuple::fields(2); 1844 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1845 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1846 1847 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1848 1849 // create result type (range) 1850 fields = TypeTuple::fields(0); 1851 1852 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1853 1854 return TypeFunc::make(domain,range); 1855 } 1856 1857 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current)) 1858 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1859 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1860 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1861 JRT_END 1862 1863 //----------------------------------------------------------------------------- 1864 1865 NamedCounter * volatile OptoRuntime::_named_counters = nullptr; 1866 1867 // 1868 // dump the collected NamedCounters. 1869 // 1870 void OptoRuntime::print_named_counters() { 1871 int total_lock_count = 0; 1872 int eliminated_lock_count = 0; 1873 1874 NamedCounter* c = _named_counters; 1875 while (c) { 1876 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1877 int count = c->count(); 1878 if (count > 0) { 1879 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1880 if (Verbose) { 1881 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1882 } 1883 total_lock_count += count; 1884 if (eliminated) { 1885 eliminated_lock_count += count; 1886 } 1887 } 1888 } 1889 c = c->next(); 1890 } 1891 if (total_lock_count > 0) { 1892 tty->print_cr("dynamic locks: %d", total_lock_count); 1893 if (eliminated_lock_count) { 1894 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1895 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1896 } 1897 } 1898 } 1899 1900 // 1901 // Allocate a new NamedCounter. The JVMState is used to generate the 1902 // name which consists of method@line for the inlining tree. 1903 // 1904 1905 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1906 int max_depth = youngest_jvms->depth(); 1907 1908 // Visit scopes from youngest to oldest. 1909 bool first = true; 1910 stringStream st; 1911 for (int depth = max_depth; depth >= 1; depth--) { 1912 JVMState* jvms = youngest_jvms->of_depth(depth); 1913 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; 1914 if (!first) { 1915 st.print(" "); 1916 } else { 1917 first = false; 1918 } 1919 int bci = jvms->bci(); 1920 if (bci < 0) bci = 0; 1921 if (m != nullptr) { 1922 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 1923 } else { 1924 st.print("no method"); 1925 } 1926 st.print("@%d", bci); 1927 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1928 } 1929 NamedCounter* c = new NamedCounter(st.freeze(), tag); 1930 1931 // atomically add the new counter to the head of the list. We only 1932 // add counters so this is safe. 1933 NamedCounter* head; 1934 do { 1935 c->set_next(nullptr); 1936 head = _named_counters; 1937 c->set_next(head); 1938 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 1939 return c; 1940 } 1941 1942 void OptoRuntime::initialize_types() { 1943 _new_instance_Type = make_new_instance_Type(); 1944 _new_array_Type = make_new_array_Type(); 1945 _multianewarray2_Type = multianewarray_Type(2); 1946 _multianewarray3_Type = multianewarray_Type(3); 1947 _multianewarray4_Type = multianewarray_Type(4); 1948 _multianewarray5_Type = multianewarray_Type(5); 1949 _multianewarrayN_Type = make_multianewarrayN_Type(); 1950 _complete_monitor_enter_Type = make_complete_monitor_enter_Type(); 1951 _complete_monitor_exit_Type = make_complete_monitor_exit_Type(); 1952 _monitor_notify_Type = make_monitor_notify_Type(); 1953 _uncommon_trap_Type = make_uncommon_trap_Type(); 1954 _athrow_Type = make_athrow_Type(); 1955 _rethrow_Type = make_rethrow_Type(); 1956 _Math_D_D_Type = make_Math_D_D_Type(); 1957 _Math_DD_D_Type = make_Math_DD_D_Type(); 1958 _modf_Type = make_modf_Type(); 1959 _l2f_Type = make_l2f_Type(); 1960 _void_long_Type = make_void_long_Type(); 1961 _void_void_Type = make_void_void_Type(); 1962 _jfr_write_checkpoint_Type = make_jfr_write_checkpoint_Type(); 1963 _flush_windows_Type = make_flush_windows_Type(); 1964 _fast_arraycopy_Type = make_arraycopy_Type(ac_fast); 1965 _checkcast_arraycopy_Type = make_arraycopy_Type(ac_checkcast); 1966 _generic_arraycopy_Type = make_arraycopy_Type(ac_generic); 1967 _slow_arraycopy_Type = make_arraycopy_Type(ac_slow); 1968 _unsafe_setmemory_Type = make_setmemory_Type(); 1969 _array_fill_Type = make_array_fill_Type(); 1970 _array_sort_Type = make_array_sort_Type(); 1971 _array_partition_Type = make_array_partition_Type(); 1972 _aescrypt_block_Type = make_aescrypt_block_Type(); 1973 _cipherBlockChaining_aescrypt_Type = make_cipherBlockChaining_aescrypt_Type(); 1974 _electronicCodeBook_aescrypt_Type = make_electronicCodeBook_aescrypt_Type(); 1975 _counterMode_aescrypt_Type = make_counterMode_aescrypt_Type(); 1976 _galoisCounterMode_aescrypt_Type = make_galoisCounterMode_aescrypt_Type(); 1977 _digestBase_implCompress_with_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ true); 1978 _digestBase_implCompress_without_sha3_Type = make_digestBase_implCompress_Type( /* is_sha3= */ false);; 1979 _digestBase_implCompressMB_with_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ true); 1980 _digestBase_implCompressMB_without_sha3_Type = make_digestBase_implCompressMB_Type(/* is_sha3= */ false); 1981 _multiplyToLen_Type = make_multiplyToLen_Type(); 1982 _montgomeryMultiply_Type = make_montgomeryMultiply_Type(); 1983 _montgomerySquare_Type = make_montgomerySquare_Type(); 1984 _squareToLen_Type = make_squareToLen_Type(); 1985 _mulAdd_Type = make_mulAdd_Type(); 1986 _bigIntegerShift_Type = make_bigIntegerShift_Type(); 1987 _vectorizedMismatch_Type = make_vectorizedMismatch_Type(); 1988 _ghash_processBlocks_Type = make_ghash_processBlocks_Type(); 1989 _chacha20Block_Type = make_chacha20Block_Type(); 1990 _base64_encodeBlock_Type = make_base64_encodeBlock_Type(); 1991 _base64_decodeBlock_Type = make_base64_decodeBlock_Type(); 1992 _string_IndexOf_Type = make_string_IndexOf_Type(); 1993 _poly1305_processBlocks_Type = make_poly1305_processBlocks_Type(); 1994 _intpoly_montgomeryMult_P256_Type = make_intpoly_montgomeryMult_P256_Type(); 1995 _intpoly_assign_Type = make_intpoly_assign_Type(); 1996 _updateBytesCRC32_Type = make_updateBytesCRC32_Type(); 1997 _updateBytesCRC32C_Type = make_updateBytesCRC32C_Type(); 1998 _updateBytesAdler32_Type = make_updateBytesAdler32_Type(); 1999 _osr_end_Type = make_osr_end_Type(); 2000 _register_finalizer_Type = make_register_finalizer_Type(); 2001 JFR_ONLY( 2002 _class_id_load_barrier_Type = make_class_id_load_barrier_Type(); 2003 ) 2004 #if INCLUDE_JVMTI 2005 _notify_jvmti_vthread_Type = make_notify_jvmti_vthread_Type(); 2006 #endif // INCLUDE_JVMTI 2007 _dtrace_method_entry_exit_Type = make_dtrace_method_entry_exit_Type(); 2008 _dtrace_object_alloc_Type = make_dtrace_object_alloc_Type(); 2009 } 2010 2011 int trace_exception_counter = 0; 2012 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 2013 trace_exception_counter++; 2014 stringStream tempst; 2015 2016 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 2017 exception_oop->print_value_on(&tempst); 2018 tempst.print(" in "); 2019 CodeBlob* blob = CodeCache::find_blob(exception_pc); 2020 if (blob->is_nmethod()) { 2021 blob->as_nmethod()->method()->print_value_on(&tempst); 2022 } else if (blob->is_runtime_stub()) { 2023 tempst.print("<runtime-stub>"); 2024 } else { 2025 tempst.print("<unknown>"); 2026 } 2027 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 2028 tempst.print("]"); 2029 2030 st->print_raw_cr(tempst.freeze()); 2031 }