1 /* 2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmClasses.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/pcDesc.hpp" 32 #include "code/scopeDesc.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/compilerDefinitions.inline.hpp" 36 #include "compiler/oopMap.hpp" 37 #include "gc/g1/g1HeapRegion.hpp" 38 #include "gc/shared/barrierSet.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "gc/shared/gcLocker.hpp" 41 #include "interpreter/bytecode.hpp" 42 #include "interpreter/interpreter.hpp" 43 #include "interpreter/linkResolver.hpp" 44 #include "logging/log.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/oopFactory.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/objArrayKlass.hpp" 49 #include "oops/klass.inline.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "oops/typeArrayOop.inline.hpp" 52 #include "opto/ad.hpp" 53 #include "opto/addnode.hpp" 54 #include "opto/callnode.hpp" 55 #include "opto/cfgnode.hpp" 56 #include "opto/graphKit.hpp" 57 #include "opto/machnode.hpp" 58 #include "opto/matcher.hpp" 59 #include "opto/memnode.hpp" 60 #include "opto/mulnode.hpp" 61 #include "opto/output.hpp" 62 #include "opto/runtime.hpp" 63 #include "opto/subnode.hpp" 64 #include "prims/jvmtiExport.hpp" 65 #include "runtime/atomic.hpp" 66 #include "runtime/frame.inline.hpp" 67 #include "runtime/handles.inline.hpp" 68 #include "runtime/interfaceSupport.inline.hpp" 69 #include "runtime/java.hpp" 70 #include "runtime/javaCalls.hpp" 71 #include "runtime/perfData.inline.hpp" 72 #include "runtime/sharedRuntime.hpp" 73 #include "runtime/signature.hpp" 74 #include "runtime/stackWatermarkSet.hpp" 75 #include "runtime/synchronizer.hpp" 76 #include "runtime/threadCritical.hpp" 77 #include "runtime/threadWXSetters.inline.hpp" 78 #include "runtime/vframe.hpp" 79 #include "runtime/vframeArray.hpp" 80 #include "runtime/vframe_hp.hpp" 81 #include "services/management.hpp" 82 #include "utilities/copy.hpp" 83 #include "utilities/preserveException.hpp" 84 85 86 // For debugging purposes: 87 // To force FullGCALot inside a runtime function, add the following two lines 88 // 89 // Universe::release_fullgc_alot_dummy(); 90 // Universe::heap()->collect(); 91 // 92 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 93 94 95 #define C2_BLOB_FIELD_DEFINE(name, type) \ 96 type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; 97 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 98 #define C2_STUB_FIELD_DEFINE(name, f, t, r) \ 99 address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; 100 #define C2_JVMTI_STUB_FIELD_DEFINE(name) \ 101 address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr; 102 C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE) 103 #undef C2_BLOB_FIELD_DEFINE 104 #undef C2_STUB_FIELD_DEFINE 105 #undef C2_JVMTI_STUB_FIELD_DEFINE 106 107 108 #define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", 109 #define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, 110 #define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, 111 const char* OptoRuntime::_stub_names[] = { 112 C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) 113 }; 114 #undef C2_BLOB_NAME_DEFINE 115 #undef C2_STUB_NAME_DEFINE 116 #undef C2_JVMTI_STUB_NAME_DEFINE 117 118 address OptoRuntime::_vtable_must_compile_Java = nullptr; 119 120 PerfCounter* _perf_OptoRuntime_class_init_barrier_redundant_count = nullptr; 121 122 // This should be called in an assertion at the start of OptoRuntime routines 123 // which are entered from compiled code (all of them) 124 #ifdef ASSERT 125 static bool check_compiled_frame(JavaThread* thread) { 126 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 127 RegisterMap map(thread, 128 RegisterMap::UpdateMap::skip, 129 RegisterMap::ProcessFrames::include, 130 RegisterMap::WalkContinuation::skip); 131 frame caller = thread->last_frame().sender(&map); 132 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 133 return true; 134 } 135 #endif // ASSERT 136 137 /* 138 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ 139 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ 140 if (var == nullptr) { return false; } 141 */ 142 143 #define GEN_C2_BLOB(name, type) \ 144 generate_ ## name ## _blob(); 145 146 // a few helper macros to conjure up generate_stub call arguments 147 #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java 148 #define C2_STUB_TYPEFUNC(name) name ## _Type 149 #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) 150 #define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) 151 152 // Almost all the C functions targeted from the generated stubs are 153 // implemented locally to OptoRuntime with names that can be generated 154 // from the stub name by appending suffix '_C'. However, in two cases 155 // a common target method also needs to be called from shared runtime 156 // stubs. In these two cases the opto stubs rely on method 157 // imlementations defined in class SharedRuntime. The following 158 // defines temporarily rebind the generated names to reference the 159 // relevant implementations. 160 161 #define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \ 162 C2_STUB_FIELD_NAME(name) = \ 163 generate_stub(env, \ 164 C2_STUB_TYPEFUNC(name), \ 165 C2_STUB_C_FUNC(name), \ 166 C2_STUB_NAME(name), \ 167 fancy_jump, \ 168 pass_tls, \ 169 pass_retpc); \ 170 if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \ 171 172 #define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name) 173 174 #define GEN_C2_JVMTI_STUB(name) \ 175 STUB_FIELD_NAME(name) = \ 176 generate_stub(env, \ 177 notify_jvmti_vthread_Type, \ 178 C2_JVMTI_STUB_C_FUNC(name), \ 179 C2_STUB_NAME(name), \ 180 0, \ 181 true, \ 182 false); \ 183 if (STUB_FIELD_NAME(name) == nullptr) { return false; } \ 184 185 bool OptoRuntime::generate(ciEnv* env) { 186 init_counters(); 187 188 C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB) 189 190 return true; 191 } 192 193 #undef GEN_C2_BLOB 194 195 #undef C2_STUB_FIELD_NAME 196 #undef C2_STUB_TYPEFUNC 197 #undef C2_STUB_C_FUNC 198 #undef C2_STUB_NAME 199 #undef GEN_C2_STUB 200 201 #undef C2_JVMTI_STUB_C_FUNC 202 #undef GEN_C2_JVMTI_STUB 203 // #undef gen 204 205 206 // Helper method to do generation of RunTimeStub's 207 address OptoRuntime::generate_stub(ciEnv* env, 208 TypeFunc_generator gen, address C_function, 209 const char *name, int is_fancy_jump, 210 bool pass_tls, 211 bool return_pc) { 212 213 // Matching the default directive, we currently have no method to match. 214 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompilerThread::current()->compiler()); 215 ResourceMark rm; 216 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive); 217 DirectivesStack::release(directive); 218 return C.stub_entry_point(); 219 } 220 221 const char* OptoRuntime::stub_name(address entry) { 222 #ifndef PRODUCT 223 CodeBlob* cb = CodeCache::find_blob(entry); 224 RuntimeStub* rs =(RuntimeStub *)cb; 225 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); 226 return rs->name(); 227 #else 228 // Fast implementation for product mode (maybe it should be inlined too) 229 return "runtime stub"; 230 #endif 231 } 232 233 // local methods passed as arguments to stub generator that forward 234 // control to corresponding JRT methods of SharedRuntime 235 236 void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 237 oopDesc* dest, jint dest_pos, 238 jint length, JavaThread* thread) { 239 SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread); 240 } 241 242 void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) { 243 SharedRuntime::complete_monitor_locking_C(obj, lock, current); 244 } 245 246 247 //============================================================================= 248 // Opto compiler runtime routines 249 //============================================================================= 250 251 252 //=============================allocation====================================== 253 // We failed the fast-path allocation. Now we need to do a scavenge or GC 254 // and try allocation again. 255 256 // object allocation 257 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_instance_C, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current)) 258 JRT_BLOCK; 259 #ifndef PRODUCT 260 SharedRuntime::_new_instance_ctr++; // new instance requires GC 261 #endif 262 assert(check_compiled_frame(current), "incorrect caller"); 263 264 // These checks are cheap to make and support reflective allocation. 265 int lh = klass->layout_helper(); 266 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 267 Handle holder(current, klass->klass_holder()); // keep the klass alive 268 klass->check_valid_for_instantiation(false, THREAD); 269 if (!HAS_PENDING_EXCEPTION) { 270 InstanceKlass::cast(klass)->initialize(THREAD); 271 } 272 } 273 274 if (!HAS_PENDING_EXCEPTION) { 275 // Scavenge and allocate an instance. 276 Handle holder(current, klass->klass_holder()); // keep the klass alive 277 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 278 current->set_vm_result(result); 279 280 // Pass oops back through thread local storage. Our apparent type to Java 281 // is that we return an oop, but we can block on exit from this routine and 282 // a GC can trash the oop in C's return register. The generated stub will 283 // fetch the oop from TLS after any possible GC. 284 } 285 286 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 287 JRT_BLOCK_END; 288 289 // inform GC that we won't do card marks for initializing writes. 290 SharedRuntime::on_slowpath_allocation_exit(current); 291 JRT_END 292 293 294 // array allocation 295 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_C, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current)) 296 JRT_BLOCK; 297 #ifndef PRODUCT 298 SharedRuntime::_new_array_ctr++; // new array requires GC 299 #endif 300 assert(check_compiled_frame(current), "incorrect caller"); 301 302 // Scavenge and allocate an instance. 303 oop result; 304 305 if (array_type->is_typeArray_klass()) { 306 // The oopFactory likes to work with the element type. 307 // (We could bypass the oopFactory, since it doesn't add much value.) 308 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 309 result = oopFactory::new_typeArray(elem_type, len, THREAD); 310 } else { 311 // Although the oopFactory likes to work with the elem_type, 312 // the compiler prefers the array_type, since it must already have 313 // that latter value in hand for the fast path. 314 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 315 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 316 result = oopFactory::new_objArray(elem_type, len, THREAD); 317 } 318 319 // Pass oops back through thread local storage. Our apparent type to Java 320 // is that we return an oop, but we can block on exit from this routine and 321 // a GC can trash the oop in C's return register. The generated stub will 322 // fetch the oop from TLS after any possible GC. 323 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 324 current->set_vm_result(result); 325 JRT_BLOCK_END; 326 327 // inform GC that we won't do card marks for initializing writes. 328 SharedRuntime::on_slowpath_allocation_exit(current); 329 JRT_END 330 331 // array allocation without zeroing 332 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_nozero_C, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current)) 333 JRT_BLOCK; 334 #ifndef PRODUCT 335 SharedRuntime::_new_array_ctr++; // new array requires GC 336 #endif 337 assert(check_compiled_frame(current), "incorrect caller"); 338 339 // Scavenge and allocate an instance. 340 oop result; 341 342 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 343 // The oopFactory likes to work with the element type. 344 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 345 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 346 347 // Pass oops back through thread local storage. Our apparent type to Java 348 // is that we return an oop, but we can block on exit from this routine and 349 // a GC can trash the oop in C's return register. The generated stub will 350 // fetch the oop from TLS after any possible GC. 351 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 352 current->set_vm_result(result); 353 JRT_BLOCK_END; 354 355 356 // inform GC that we won't do card marks for initializing writes. 357 SharedRuntime::on_slowpath_allocation_exit(current); 358 359 oop result = current->vm_result(); 360 if ((len > 0) && (result != nullptr) && 361 is_deoptimized_caller_frame(current)) { 362 // Zero array here if the caller is deoptimized. 363 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); 364 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 365 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); 366 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); 367 HeapWord* obj = cast_from_oop<HeapWord*>(result); 368 if (!is_aligned(hs_bytes, BytesPerLong)) { 369 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0; 370 hs_bytes += BytesPerInt; 371 } 372 373 // Optimized zeroing. 374 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); 375 const size_t aligned_hs = hs_bytes / BytesPerLong; 376 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 377 } 378 379 JRT_END 380 381 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 382 383 // multianewarray for 2 dimensions 384 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray2_C, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current)) 385 #ifndef PRODUCT 386 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 387 #endif 388 assert(check_compiled_frame(current), "incorrect caller"); 389 assert(elem_type->is_klass(), "not a class"); 390 jint dims[2]; 391 dims[0] = len1; 392 dims[1] = len2; 393 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 394 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 395 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 396 current->set_vm_result(obj); 397 JRT_END 398 399 // multianewarray for 3 dimensions 400 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray3_C, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current)) 401 #ifndef PRODUCT 402 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 403 #endif 404 assert(check_compiled_frame(current), "incorrect caller"); 405 assert(elem_type->is_klass(), "not a class"); 406 jint dims[3]; 407 dims[0] = len1; 408 dims[1] = len2; 409 dims[2] = len3; 410 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 411 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 412 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 413 current->set_vm_result(obj); 414 JRT_END 415 416 // multianewarray for 4 dimensions 417 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray4_C, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current)) 418 #ifndef PRODUCT 419 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 420 #endif 421 assert(check_compiled_frame(current), "incorrect caller"); 422 assert(elem_type->is_klass(), "not a class"); 423 jint dims[4]; 424 dims[0] = len1; 425 dims[1] = len2; 426 dims[2] = len3; 427 dims[3] = len4; 428 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 429 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 430 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 431 current->set_vm_result(obj); 432 JRT_END 433 434 // multianewarray for 5 dimensions 435 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current)) 436 #ifndef PRODUCT 437 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 438 #endif 439 assert(check_compiled_frame(current), "incorrect caller"); 440 assert(elem_type->is_klass(), "not a class"); 441 jint dims[5]; 442 dims[0] = len1; 443 dims[1] = len2; 444 dims[2] = len3; 445 dims[3] = len4; 446 dims[4] = len5; 447 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 448 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 449 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 450 current->set_vm_result(obj); 451 JRT_END 452 453 JRT_ENTRY_PROF(void, OptoRuntime, multianewarrayN_C, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current)) 454 assert(check_compiled_frame(current), "incorrect caller"); 455 assert(elem_type->is_klass(), "not a class"); 456 assert(oop(dims)->is_typeArray(), "not an array"); 457 458 ResourceMark rm; 459 jint len = dims->length(); 460 assert(len > 0, "Dimensions array should contain data"); 461 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 462 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 463 c_dims, len); 464 465 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 466 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 467 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 468 current->set_vm_result(obj); 469 JRT_END 470 471 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notify_C, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current)) 472 473 // Very few notify/notifyAll operations find any threads on the waitset, so 474 // the dominant fast-path is to simply return. 475 // Relatedly, it's critical that notify/notifyAll be fast in order to 476 // reduce lock hold times. 477 if (!SafepointSynchronize::is_synchronizing()) { 478 if (ObjectSynchronizer::quick_notify(obj, current, false)) { 479 return; 480 } 481 } 482 483 // This is the case the fast-path above isn't provisioned to handle. 484 // The fast-path is designed to handle frequently arising cases in an efficient manner. 485 // (The fast-path is just a degenerate variant of the slow-path). 486 // Perform the dreaded state transition and pass control into the slow-path. 487 JRT_BLOCK; 488 Handle h_obj(current, obj); 489 ObjectSynchronizer::notify(h_obj, CHECK); 490 JRT_BLOCK_END; 491 JRT_END 492 493 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notifyAll_C, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current)) 494 495 if (!SafepointSynchronize::is_synchronizing() ) { 496 if (ObjectSynchronizer::quick_notify(obj, current, true)) { 497 return; 498 } 499 } 500 501 // This is the case the fast-path above isn't provisioned to handle. 502 // The fast-path is designed to handle frequently arising cases in an efficient manner. 503 // (The fast-path is just a degenerate variant of the slow-path). 504 // Perform the dreaded state transition and pass control into the slow-path. 505 JRT_BLOCK; 506 Handle h_obj(current, obj); 507 ObjectSynchronizer::notifyall(h_obj, CHECK); 508 JRT_BLOCK_END; 509 JRT_END 510 511 const TypeFunc *OptoRuntime::new_instance_Type() { 512 // create input type (domain) 513 const Type **fields = TypeTuple::fields(1); 514 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 515 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 516 517 // create result type (range) 518 fields = TypeTuple::fields(1); 519 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 520 521 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 522 523 return TypeFunc::make(domain, range); 524 } 525 526 #if INCLUDE_JVMTI 527 const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() { 528 // create input type (domain) 529 const Type **fields = TypeTuple::fields(2); 530 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop 531 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean 532 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 533 534 // no result type needed 535 fields = TypeTuple::fields(1); 536 fields[TypeFunc::Parms+0] = nullptr; // void 537 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 538 539 return TypeFunc::make(domain,range); 540 } 541 #endif 542 543 const TypeFunc *OptoRuntime::athrow_Type() { 544 // create input type (domain) 545 const Type **fields = TypeTuple::fields(1); 546 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 547 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 548 549 // create result type (range) 550 fields = TypeTuple::fields(0); 551 552 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 553 554 return TypeFunc::make(domain, range); 555 } 556 557 558 const TypeFunc *OptoRuntime::new_array_Type() { 559 // create input type (domain) 560 const Type **fields = TypeTuple::fields(2); 561 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 562 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 563 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 564 565 // create result type (range) 566 fields = TypeTuple::fields(1); 567 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 568 569 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 570 571 return TypeFunc::make(domain, range); 572 } 573 574 const TypeFunc *OptoRuntime::new_array_nozero_Type() { 575 return new_array_Type(); 576 } 577 578 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) { 579 // create input type (domain) 580 const int nargs = ndim + 1; 581 const Type **fields = TypeTuple::fields(nargs); 582 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 583 for( int i = 1; i < nargs; i++ ) 584 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 585 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 586 587 // create result type (range) 588 fields = TypeTuple::fields(1); 589 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 590 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 591 592 return TypeFunc::make(domain, range); 593 } 594 595 const TypeFunc *OptoRuntime::multianewarray2_Type() { 596 return multianewarray_Type(2); 597 } 598 599 const TypeFunc *OptoRuntime::multianewarray3_Type() { 600 return multianewarray_Type(3); 601 } 602 603 const TypeFunc *OptoRuntime::multianewarray4_Type() { 604 return multianewarray_Type(4); 605 } 606 607 const TypeFunc *OptoRuntime::multianewarray5_Type() { 608 return multianewarray_Type(5); 609 } 610 611 const TypeFunc *OptoRuntime::multianewarrayN_Type() { 612 // create input type (domain) 613 const Type **fields = TypeTuple::fields(2); 614 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 615 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 616 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 617 618 // create result type (range) 619 fields = TypeTuple::fields(1); 620 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 621 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 622 623 return TypeFunc::make(domain, range); 624 } 625 626 const TypeFunc *OptoRuntime::uncommon_trap_Type() { 627 // create input type (domain) 628 const Type **fields = TypeTuple::fields(1); 629 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 630 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 631 632 // create result type (range) 633 fields = TypeTuple::fields(0); 634 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 635 636 return TypeFunc::make(domain, range); 637 } 638 639 //----------------------------------------------------------------------------- 640 // Monitor Handling 641 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() { 642 // create input type (domain) 643 const Type **fields = TypeTuple::fields(2); 644 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 645 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 646 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 647 648 // create result type (range) 649 fields = TypeTuple::fields(0); 650 651 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 652 653 return TypeFunc::make(domain,range); 654 } 655 656 const TypeFunc *OptoRuntime::complete_monitor_locking_Type() { 657 return complete_monitor_enter_Type(); 658 } 659 660 //----------------------------------------------------------------------------- 661 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() { 662 // create input type (domain) 663 const Type **fields = TypeTuple::fields(3); 664 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 665 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 666 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 667 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 668 669 // create result type (range) 670 fields = TypeTuple::fields(0); 671 672 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 673 674 return TypeFunc::make(domain, range); 675 } 676 677 const TypeFunc *OptoRuntime::monitor_notify_Type() { 678 // create input type (domain) 679 const Type **fields = TypeTuple::fields(1); 680 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 681 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 682 683 // create result type (range) 684 fields = TypeTuple::fields(0); 685 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 686 return TypeFunc::make(domain, range); 687 } 688 689 const TypeFunc *OptoRuntime::monitor_notifyAll_Type() { 690 return monitor_notify_Type(); 691 } 692 693 const TypeFunc* OptoRuntime::flush_windows_Type() { 694 // create input type (domain) 695 const Type** fields = TypeTuple::fields(1); 696 fields[TypeFunc::Parms+0] = nullptr; // void 697 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 698 699 // create result type 700 fields = TypeTuple::fields(1); 701 fields[TypeFunc::Parms+0] = nullptr; // void 702 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 703 704 return TypeFunc::make(domain, range); 705 } 706 707 const TypeFunc* OptoRuntime::l2f_Type() { 708 // create input type (domain) 709 const Type **fields = TypeTuple::fields(2); 710 fields[TypeFunc::Parms+0] = TypeLong::LONG; 711 fields[TypeFunc::Parms+1] = Type::HALF; 712 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 713 714 // create result type (range) 715 fields = TypeTuple::fields(1); 716 fields[TypeFunc::Parms+0] = Type::FLOAT; 717 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 718 719 return TypeFunc::make(domain, range); 720 } 721 722 const TypeFunc* OptoRuntime::modf_Type() { 723 const Type **fields = TypeTuple::fields(2); 724 fields[TypeFunc::Parms+0] = Type::FLOAT; 725 fields[TypeFunc::Parms+1] = Type::FLOAT; 726 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 727 728 // create result type (range) 729 fields = TypeTuple::fields(1); 730 fields[TypeFunc::Parms+0] = Type::FLOAT; 731 732 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 733 734 return TypeFunc::make(domain, range); 735 } 736 737 const TypeFunc *OptoRuntime::Math_D_D_Type() { 738 // create input type (domain) 739 const Type **fields = TypeTuple::fields(2); 740 // Symbol* name of class to be loaded 741 fields[TypeFunc::Parms+0] = Type::DOUBLE; 742 fields[TypeFunc::Parms+1] = Type::HALF; 743 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 744 745 // create result type (range) 746 fields = TypeTuple::fields(2); 747 fields[TypeFunc::Parms+0] = Type::DOUBLE; 748 fields[TypeFunc::Parms+1] = Type::HALF; 749 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 750 751 return TypeFunc::make(domain, range); 752 } 753 754 const TypeFunc *OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) { 755 // create input type (domain) 756 const Type **fields = TypeTuple::fields(num_arg); 757 // Symbol* name of class to be loaded 758 assert(num_arg > 0, "must have at least 1 input"); 759 for (uint i = 0; i < num_arg; i++) { 760 fields[TypeFunc::Parms+i] = in_type; 761 } 762 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields); 763 764 // create result type (range) 765 const uint num_ret = 1; 766 fields = TypeTuple::fields(num_ret); 767 fields[TypeFunc::Parms+0] = out_type; 768 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields); 769 770 return TypeFunc::make(domain, range); 771 } 772 773 const TypeFunc* OptoRuntime::Math_DD_D_Type() { 774 const Type **fields = TypeTuple::fields(4); 775 fields[TypeFunc::Parms+0] = Type::DOUBLE; 776 fields[TypeFunc::Parms+1] = Type::HALF; 777 fields[TypeFunc::Parms+2] = Type::DOUBLE; 778 fields[TypeFunc::Parms+3] = Type::HALF; 779 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 780 781 // create result type (range) 782 fields = TypeTuple::fields(2); 783 fields[TypeFunc::Parms+0] = Type::DOUBLE; 784 fields[TypeFunc::Parms+1] = Type::HALF; 785 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 786 787 return TypeFunc::make(domain, range); 788 } 789 790 //-------------- currentTimeMillis, currentTimeNanos, etc 791 792 const TypeFunc* OptoRuntime::void_long_Type() { 793 // create input type (domain) 794 const Type **fields = TypeTuple::fields(0); 795 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 796 797 // create result type (range) 798 fields = TypeTuple::fields(2); 799 fields[TypeFunc::Parms+0] = TypeLong::LONG; 800 fields[TypeFunc::Parms+1] = Type::HALF; 801 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 802 803 return TypeFunc::make(domain, range); 804 } 805 806 const TypeFunc* OptoRuntime::void_void_Type() { 807 // create input type (domain) 808 const Type **fields = TypeTuple::fields(0); 809 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 810 811 // create result type (range) 812 fields = TypeTuple::fields(0); 813 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 814 return TypeFunc::make(domain, range); 815 } 816 817 const TypeFunc* OptoRuntime::jfr_write_checkpoint_Type() { 818 // create input type (domain) 819 const Type **fields = TypeTuple::fields(0); 820 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 821 822 // create result type (range) 823 fields = TypeTuple::fields(0); 824 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 825 return TypeFunc::make(domain, range); 826 } 827 828 829 // Takes as parameters: 830 // void *dest 831 // long size 832 // uchar byte 833 const TypeFunc* OptoRuntime::make_setmemory_Type() { 834 // create input type (domain) 835 int argcnt = NOT_LP64(3) LP64_ONLY(4); 836 const Type** fields = TypeTuple::fields(argcnt); 837 int argp = TypeFunc::Parms; 838 fields[argp++] = TypePtr::NOTNULL; // dest 839 fields[argp++] = TypeX_X; // size 840 LP64_ONLY(fields[argp++] = Type::HALF); // size 841 fields[argp++] = TypeInt::UBYTE; // bytevalue 842 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 843 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 844 845 // no result type needed 846 fields = TypeTuple::fields(1); 847 fields[TypeFunc::Parms+0] = nullptr; // void 848 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 849 return TypeFunc::make(domain, range); 850 } 851 852 // arraycopy stub variations: 853 enum ArrayCopyType { 854 ac_fast, // void(ptr, ptr, size_t) 855 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 856 ac_slow, // void(ptr, int, ptr, int, int) 857 ac_generic // int(ptr, int, ptr, int, int) 858 }; 859 860 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 861 // create input type (domain) 862 int num_args = (act == ac_fast ? 3 : 5); 863 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 864 int argcnt = num_args; 865 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 866 const Type** fields = TypeTuple::fields(argcnt); 867 int argp = TypeFunc::Parms; 868 fields[argp++] = TypePtr::NOTNULL; // src 869 if (num_size_args == 0) { 870 fields[argp++] = TypeInt::INT; // src_pos 871 } 872 fields[argp++] = TypePtr::NOTNULL; // dest 873 if (num_size_args == 0) { 874 fields[argp++] = TypeInt::INT; // dest_pos 875 fields[argp++] = TypeInt::INT; // length 876 } 877 while (num_size_args-- > 0) { 878 fields[argp++] = TypeX_X; // size in whatevers (size_t) 879 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 880 } 881 if (act == ac_checkcast) { 882 fields[argp++] = TypePtr::NOTNULL; // super_klass 883 } 884 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 885 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 886 887 // create result type if needed 888 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 889 fields = TypeTuple::fields(1); 890 if (retcnt == 0) 891 fields[TypeFunc::Parms+0] = nullptr; // void 892 else 893 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 894 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 895 return TypeFunc::make(domain, range); 896 } 897 898 const TypeFunc* OptoRuntime::fast_arraycopy_Type() { 899 // This signature is simple: Two base pointers and a size_t. 900 return make_arraycopy_Type(ac_fast); 901 } 902 903 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() { 904 // An extension of fast_arraycopy_Type which adds type checking. 905 return make_arraycopy_Type(ac_checkcast); 906 } 907 908 const TypeFunc* OptoRuntime::slow_arraycopy_Type() { 909 // This signature is exactly the same as System.arraycopy. 910 // There are no intptr_t (int/long) arguments. 911 return make_arraycopy_Type(ac_slow); 912 } 913 914 const TypeFunc* OptoRuntime::generic_arraycopy_Type() { 915 // This signature is like System.arraycopy, except that it returns status. 916 return make_arraycopy_Type(ac_generic); 917 } 918 919 920 const TypeFunc* OptoRuntime::array_fill_Type() { 921 const Type** fields; 922 int argp = TypeFunc::Parms; 923 // create input type (domain): pointer, int, size_t 924 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 925 fields[argp++] = TypePtr::NOTNULL; 926 fields[argp++] = TypeInt::INT; 927 fields[argp++] = TypeX_X; // size in whatevers (size_t) 928 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 929 const TypeTuple *domain = TypeTuple::make(argp, fields); 930 931 // create result type 932 fields = TypeTuple::fields(1); 933 fields[TypeFunc::Parms+0] = nullptr; // void 934 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 935 936 return TypeFunc::make(domain, range); 937 } 938 939 const TypeFunc* OptoRuntime::array_partition_Type() { 940 // create input type (domain) 941 int num_args = 7; 942 int argcnt = num_args; 943 const Type** fields = TypeTuple::fields(argcnt); 944 int argp = TypeFunc::Parms; 945 fields[argp++] = TypePtr::NOTNULL; // array 946 fields[argp++] = TypeInt::INT; // element type 947 fields[argp++] = TypeInt::INT; // low 948 fields[argp++] = TypeInt::INT; // end 949 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array) 950 fields[argp++] = TypeInt::INT; // indexPivot1 951 fields[argp++] = TypeInt::INT; // indexPivot2 952 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 953 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 954 955 // no result type needed 956 fields = TypeTuple::fields(1); 957 fields[TypeFunc::Parms+0] = nullptr; // void 958 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 959 return TypeFunc::make(domain, range); 960 } 961 962 const TypeFunc* OptoRuntime::array_sort_Type() { 963 // create input type (domain) 964 int num_args = 4; 965 int argcnt = num_args; 966 const Type** fields = TypeTuple::fields(argcnt); 967 int argp = TypeFunc::Parms; 968 fields[argp++] = TypePtr::NOTNULL; // array 969 fields[argp++] = TypeInt::INT; // element type 970 fields[argp++] = TypeInt::INT; // fromIndex 971 fields[argp++] = TypeInt::INT; // toIndex 972 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 973 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 974 975 // no result type needed 976 fields = TypeTuple::fields(1); 977 fields[TypeFunc::Parms+0] = nullptr; // void 978 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 979 return TypeFunc::make(domain, range); 980 } 981 982 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant) 983 const TypeFunc* OptoRuntime::aescrypt_block_Type() { 984 // create input type (domain) 985 int num_args = 3; 986 int argcnt = num_args; 987 const Type** fields = TypeTuple::fields(argcnt); 988 int argp = TypeFunc::Parms; 989 fields[argp++] = TypePtr::NOTNULL; // src 990 fields[argp++] = TypePtr::NOTNULL; // dest 991 fields[argp++] = TypePtr::NOTNULL; // k array 992 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 993 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 994 995 // no result type needed 996 fields = TypeTuple::fields(1); 997 fields[TypeFunc::Parms+0] = nullptr; // void 998 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 999 return TypeFunc::make(domain, range); 1000 } 1001 1002 /** 1003 * int updateBytesCRC32(int crc, byte* b, int len) 1004 */ 1005 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { 1006 // create input type (domain) 1007 int num_args = 3; 1008 int argcnt = num_args; 1009 const Type** fields = TypeTuple::fields(argcnt); 1010 int argp = TypeFunc::Parms; 1011 fields[argp++] = TypeInt::INT; // crc 1012 fields[argp++] = TypePtr::NOTNULL; // src 1013 fields[argp++] = TypeInt::INT; // len 1014 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1015 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1016 1017 // result type needed 1018 fields = TypeTuple::fields(1); 1019 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1020 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1021 return TypeFunc::make(domain, range); 1022 } 1023 1024 /** 1025 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table) 1026 */ 1027 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() { 1028 // create input type (domain) 1029 int num_args = 4; 1030 int argcnt = num_args; 1031 const Type** fields = TypeTuple::fields(argcnt); 1032 int argp = TypeFunc::Parms; 1033 fields[argp++] = TypeInt::INT; // crc 1034 fields[argp++] = TypePtr::NOTNULL; // buf 1035 fields[argp++] = TypeInt::INT; // len 1036 fields[argp++] = TypePtr::NOTNULL; // table 1037 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1038 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1039 1040 // result type needed 1041 fields = TypeTuple::fields(1); 1042 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1043 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1044 return TypeFunc::make(domain, range); 1045 } 1046 1047 /** 1048 * int updateBytesAdler32(int adler, bytes* b, int off, int len) 1049 */ 1050 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() { 1051 // create input type (domain) 1052 int num_args = 3; 1053 int argcnt = num_args; 1054 const Type** fields = TypeTuple::fields(argcnt); 1055 int argp = TypeFunc::Parms; 1056 fields[argp++] = TypeInt::INT; // crc 1057 fields[argp++] = TypePtr::NOTNULL; // src + offset 1058 fields[argp++] = TypeInt::INT; // len 1059 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1060 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1061 1062 // result type needed 1063 fields = TypeTuple::fields(1); 1064 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1065 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1066 return TypeFunc::make(domain, range); 1067 } 1068 1069 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 1070 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { 1071 // create input type (domain) 1072 int num_args = 5; 1073 int argcnt = num_args; 1074 const Type** fields = TypeTuple::fields(argcnt); 1075 int argp = TypeFunc::Parms; 1076 fields[argp++] = TypePtr::NOTNULL; // src 1077 fields[argp++] = TypePtr::NOTNULL; // dest 1078 fields[argp++] = TypePtr::NOTNULL; // k array 1079 fields[argp++] = TypePtr::NOTNULL; // r array 1080 fields[argp++] = TypeInt::INT; // src len 1081 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1082 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1083 1084 // returning cipher len (int) 1085 fields = TypeTuple::fields(1); 1086 fields[TypeFunc::Parms+0] = TypeInt::INT; 1087 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1088 return TypeFunc::make(domain, range); 1089 } 1090 1091 // for electronicCodeBook calls of aescrypt encrypt/decrypt, three pointers and a length, returning int 1092 const TypeFunc* OptoRuntime::electronicCodeBook_aescrypt_Type() { 1093 // create input type (domain) 1094 int num_args = 4; 1095 int argcnt = num_args; 1096 const Type** fields = TypeTuple::fields(argcnt); 1097 int argp = TypeFunc::Parms; 1098 fields[argp++] = TypePtr::NOTNULL; // src 1099 fields[argp++] = TypePtr::NOTNULL; // dest 1100 fields[argp++] = TypePtr::NOTNULL; // k array 1101 fields[argp++] = TypeInt::INT; // src len 1102 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1103 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1104 1105 // returning cipher len (int) 1106 fields = TypeTuple::fields(1); 1107 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1108 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1109 return TypeFunc::make(domain, range); 1110 } 1111 1112 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 1113 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() { 1114 // create input type (domain) 1115 int num_args = 7; 1116 int argcnt = num_args; 1117 const Type** fields = TypeTuple::fields(argcnt); 1118 int argp = TypeFunc::Parms; 1119 fields[argp++] = TypePtr::NOTNULL; // src 1120 fields[argp++] = TypePtr::NOTNULL; // dest 1121 fields[argp++] = TypePtr::NOTNULL; // k array 1122 fields[argp++] = TypePtr::NOTNULL; // counter array 1123 fields[argp++] = TypeInt::INT; // src len 1124 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 1125 fields[argp++] = TypePtr::NOTNULL; // saved used addr 1126 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1127 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1128 // returning cipher len (int) 1129 fields = TypeTuple::fields(1); 1130 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1131 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1132 return TypeFunc::make(domain, range); 1133 } 1134 1135 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 1136 const TypeFunc* OptoRuntime::galoisCounterMode_aescrypt_Type() { 1137 // create input type (domain) 1138 int num_args = 8; 1139 int argcnt = num_args; 1140 const Type** fields = TypeTuple::fields(argcnt); 1141 int argp = TypeFunc::Parms; 1142 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs 1143 fields[argp++] = TypeInt::INT; // int len 1144 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs 1145 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs 1146 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj 1147 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj 1148 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj 1149 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj 1150 1151 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1152 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1153 // returning cipher len (int) 1154 fields = TypeTuple::fields(1); 1155 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1156 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1157 return TypeFunc::make(domain, range); 1158 } 1159 1160 /* 1161 * void implCompress(byte[] buf, int ofs) 1162 */ 1163 const TypeFunc* OptoRuntime::digestBase_implCompress_Type(bool is_sha3) { 1164 // create input type (domain) 1165 int num_args = is_sha3 ? 3 : 2; 1166 int argcnt = num_args; 1167 const Type** fields = TypeTuple::fields(argcnt); 1168 int argp = TypeFunc::Parms; 1169 fields[argp++] = TypePtr::NOTNULL; // buf 1170 fields[argp++] = TypePtr::NOTNULL; // state 1171 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1172 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1173 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1174 1175 // no result type needed 1176 fields = TypeTuple::fields(1); 1177 fields[TypeFunc::Parms+0] = nullptr; // void 1178 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1179 return TypeFunc::make(domain, range); 1180 } 1181 1182 /* 1183 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 1184 */ 1185 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type(bool is_sha3) { 1186 // create input type (domain) 1187 int num_args = is_sha3 ? 5 : 4; 1188 int argcnt = num_args; 1189 const Type** fields = TypeTuple::fields(argcnt); 1190 int argp = TypeFunc::Parms; 1191 fields[argp++] = TypePtr::NOTNULL; // buf 1192 fields[argp++] = TypePtr::NOTNULL; // state 1193 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1194 fields[argp++] = TypeInt::INT; // ofs 1195 fields[argp++] = TypeInt::INT; // limit 1196 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1197 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1198 1199 // returning ofs (int) 1200 fields = TypeTuple::fields(1); 1201 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1202 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1203 return TypeFunc::make(domain, range); 1204 } 1205 1206 const TypeFunc* OptoRuntime::multiplyToLen_Type() { 1207 // create input type (domain) 1208 int num_args = 5; 1209 int argcnt = num_args; 1210 const Type** fields = TypeTuple::fields(argcnt); 1211 int argp = TypeFunc::Parms; 1212 fields[argp++] = TypePtr::NOTNULL; // x 1213 fields[argp++] = TypeInt::INT; // xlen 1214 fields[argp++] = TypePtr::NOTNULL; // y 1215 fields[argp++] = TypeInt::INT; // ylen 1216 fields[argp++] = TypePtr::NOTNULL; // z 1217 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1218 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1219 1220 // no result type needed 1221 fields = TypeTuple::fields(1); 1222 fields[TypeFunc::Parms+0] = nullptr; 1223 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1224 return TypeFunc::make(domain, range); 1225 } 1226 1227 const TypeFunc* OptoRuntime::squareToLen_Type() { 1228 // create input type (domain) 1229 int num_args = 4; 1230 int argcnt = num_args; 1231 const Type** fields = TypeTuple::fields(argcnt); 1232 int argp = TypeFunc::Parms; 1233 fields[argp++] = TypePtr::NOTNULL; // x 1234 fields[argp++] = TypeInt::INT; // len 1235 fields[argp++] = TypePtr::NOTNULL; // z 1236 fields[argp++] = TypeInt::INT; // zlen 1237 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1238 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1239 1240 // no result type needed 1241 fields = TypeTuple::fields(1); 1242 fields[TypeFunc::Parms+0] = nullptr; 1243 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1244 return TypeFunc::make(domain, range); 1245 } 1246 1247 // for mulAdd calls, 2 pointers and 3 ints, returning int 1248 const TypeFunc* OptoRuntime::mulAdd_Type() { 1249 // create input type (domain) 1250 int num_args = 5; 1251 int argcnt = num_args; 1252 const Type** fields = TypeTuple::fields(argcnt); 1253 int argp = TypeFunc::Parms; 1254 fields[argp++] = TypePtr::NOTNULL; // out 1255 fields[argp++] = TypePtr::NOTNULL; // in 1256 fields[argp++] = TypeInt::INT; // offset 1257 fields[argp++] = TypeInt::INT; // len 1258 fields[argp++] = TypeInt::INT; // k 1259 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1260 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1261 1262 // returning carry (int) 1263 fields = TypeTuple::fields(1); 1264 fields[TypeFunc::Parms+0] = TypeInt::INT; 1265 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1266 return TypeFunc::make(domain, range); 1267 } 1268 1269 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() { 1270 // create input type (domain) 1271 int num_args = 7; 1272 int argcnt = num_args; 1273 const Type** fields = TypeTuple::fields(argcnt); 1274 int argp = TypeFunc::Parms; 1275 fields[argp++] = TypePtr::NOTNULL; // a 1276 fields[argp++] = TypePtr::NOTNULL; // b 1277 fields[argp++] = TypePtr::NOTNULL; // n 1278 fields[argp++] = TypeInt::INT; // len 1279 fields[argp++] = TypeLong::LONG; // inv 1280 fields[argp++] = Type::HALF; 1281 fields[argp++] = TypePtr::NOTNULL; // result 1282 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1283 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1284 1285 // result type needed 1286 fields = TypeTuple::fields(1); 1287 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1288 1289 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1290 return TypeFunc::make(domain, range); 1291 } 1292 1293 const TypeFunc* OptoRuntime::montgomerySquare_Type() { 1294 // create input type (domain) 1295 int num_args = 6; 1296 int argcnt = num_args; 1297 const Type** fields = TypeTuple::fields(argcnt); 1298 int argp = TypeFunc::Parms; 1299 fields[argp++] = TypePtr::NOTNULL; // a 1300 fields[argp++] = TypePtr::NOTNULL; // n 1301 fields[argp++] = TypeInt::INT; // len 1302 fields[argp++] = TypeLong::LONG; // inv 1303 fields[argp++] = Type::HALF; 1304 fields[argp++] = TypePtr::NOTNULL; // result 1305 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1306 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1307 1308 // result type needed 1309 fields = TypeTuple::fields(1); 1310 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1311 1312 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1313 return TypeFunc::make(domain, range); 1314 } 1315 1316 const TypeFunc * OptoRuntime::bigIntegerShift_Type() { 1317 int argcnt = 5; 1318 const Type** fields = TypeTuple::fields(argcnt); 1319 int argp = TypeFunc::Parms; 1320 fields[argp++] = TypePtr::NOTNULL; // newArr 1321 fields[argp++] = TypePtr::NOTNULL; // oldArr 1322 fields[argp++] = TypeInt::INT; // newIdx 1323 fields[argp++] = TypeInt::INT; // shiftCount 1324 fields[argp++] = TypeInt::INT; // numIter 1325 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1326 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1327 1328 // no result type needed 1329 fields = TypeTuple::fields(1); 1330 fields[TypeFunc::Parms + 0] = nullptr; 1331 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1332 return TypeFunc::make(domain, range); 1333 } 1334 1335 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() { 1336 // create input type (domain) 1337 int num_args = 4; 1338 int argcnt = num_args; 1339 const Type** fields = TypeTuple::fields(argcnt); 1340 int argp = TypeFunc::Parms; 1341 fields[argp++] = TypePtr::NOTNULL; // obja 1342 fields[argp++] = TypePtr::NOTNULL; // objb 1343 fields[argp++] = TypeInt::INT; // length, number of elements 1344 fields[argp++] = TypeInt::INT; // log2scale, element size 1345 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1346 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1347 1348 //return mismatch index (int) 1349 fields = TypeTuple::fields(1); 1350 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1351 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1352 return TypeFunc::make(domain, range); 1353 } 1354 1355 // GHASH block processing 1356 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { 1357 int argcnt = 4; 1358 1359 const Type** fields = TypeTuple::fields(argcnt); 1360 int argp = TypeFunc::Parms; 1361 fields[argp++] = TypePtr::NOTNULL; // state 1362 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1363 fields[argp++] = TypePtr::NOTNULL; // data 1364 fields[argp++] = TypeInt::INT; // blocks 1365 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1366 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1367 1368 // result type needed 1369 fields = TypeTuple::fields(1); 1370 fields[TypeFunc::Parms+0] = nullptr; // void 1371 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1372 return TypeFunc::make(domain, range); 1373 } 1374 1375 // ChaCha20 Block function 1376 const TypeFunc* OptoRuntime::chacha20Block_Type() { 1377 int argcnt = 2; 1378 1379 const Type** fields = TypeTuple::fields(argcnt); 1380 int argp = TypeFunc::Parms; 1381 fields[argp++] = TypePtr::NOTNULL; // state 1382 fields[argp++] = TypePtr::NOTNULL; // result 1383 1384 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1385 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1386 1387 // result type needed 1388 fields = TypeTuple::fields(1); 1389 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int 1390 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1391 return TypeFunc::make(domain, range); 1392 } 1393 1394 // Base64 encode function 1395 const TypeFunc* OptoRuntime::base64_encodeBlock_Type() { 1396 int argcnt = 6; 1397 1398 const Type** fields = TypeTuple::fields(argcnt); 1399 int argp = TypeFunc::Parms; 1400 fields[argp++] = TypePtr::NOTNULL; // src array 1401 fields[argp++] = TypeInt::INT; // offset 1402 fields[argp++] = TypeInt::INT; // length 1403 fields[argp++] = TypePtr::NOTNULL; // dest array 1404 fields[argp++] = TypeInt::INT; // dp 1405 fields[argp++] = TypeInt::BOOL; // isURL 1406 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1407 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1408 1409 // result type needed 1410 fields = TypeTuple::fields(1); 1411 fields[TypeFunc::Parms + 0] = nullptr; // void 1412 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1413 return TypeFunc::make(domain, range); 1414 } 1415 1416 // String IndexOf function 1417 const TypeFunc* OptoRuntime::string_IndexOf_Type() { 1418 int argcnt = 4; 1419 1420 const Type** fields = TypeTuple::fields(argcnt); 1421 int argp = TypeFunc::Parms; 1422 fields[argp++] = TypePtr::NOTNULL; // haystack array 1423 fields[argp++] = TypeInt::INT; // haystack length 1424 fields[argp++] = TypePtr::NOTNULL; // needle array 1425 fields[argp++] = TypeInt::INT; // needle length 1426 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1427 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1428 1429 // result type needed 1430 fields = TypeTuple::fields(1); 1431 fields[TypeFunc::Parms + 0] = TypeInt::INT; // Index of needle in haystack 1432 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1433 return TypeFunc::make(domain, range); 1434 } 1435 1436 // Base64 decode function 1437 const TypeFunc* OptoRuntime::base64_decodeBlock_Type() { 1438 int argcnt = 7; 1439 1440 const Type** fields = TypeTuple::fields(argcnt); 1441 int argp = TypeFunc::Parms; 1442 fields[argp++] = TypePtr::NOTNULL; // src array 1443 fields[argp++] = TypeInt::INT; // src offset 1444 fields[argp++] = TypeInt::INT; // src length 1445 fields[argp++] = TypePtr::NOTNULL; // dest array 1446 fields[argp++] = TypeInt::INT; // dest offset 1447 fields[argp++] = TypeInt::BOOL; // isURL 1448 fields[argp++] = TypeInt::BOOL; // isMIME 1449 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1450 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1451 1452 // result type needed 1453 fields = TypeTuple::fields(1); 1454 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst 1455 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1456 return TypeFunc::make(domain, range); 1457 } 1458 1459 // Poly1305 processMultipleBlocks function 1460 const TypeFunc* OptoRuntime::poly1305_processBlocks_Type() { 1461 int argcnt = 4; 1462 1463 const Type** fields = TypeTuple::fields(argcnt); 1464 int argp = TypeFunc::Parms; 1465 fields[argp++] = TypePtr::NOTNULL; // input array 1466 fields[argp++] = TypeInt::INT; // input length 1467 fields[argp++] = TypePtr::NOTNULL; // accumulator array 1468 fields[argp++] = TypePtr::NOTNULL; // r array 1469 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1470 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1471 1472 // result type needed 1473 fields = TypeTuple::fields(1); 1474 fields[TypeFunc::Parms + 0] = nullptr; // void 1475 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1476 return TypeFunc::make(domain, range); 1477 } 1478 1479 // MontgomeryIntegerPolynomialP256 multiply function 1480 const TypeFunc* OptoRuntime::intpoly_montgomeryMult_P256_Type() { 1481 int argcnt = 3; 1482 1483 const Type** fields = TypeTuple::fields(argcnt); 1484 int argp = TypeFunc::Parms; 1485 fields[argp++] = TypePtr::NOTNULL; // a array 1486 fields[argp++] = TypePtr::NOTNULL; // b array 1487 fields[argp++] = TypePtr::NOTNULL; // r(esult) array 1488 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1489 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1490 1491 // result type needed 1492 fields = TypeTuple::fields(1); 1493 fields[TypeFunc::Parms + 0] = nullptr; // void 1494 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1495 return TypeFunc::make(domain, range); 1496 } 1497 1498 // IntegerPolynomial constant time assignment function 1499 const TypeFunc* OptoRuntime::intpoly_assign_Type() { 1500 int argcnt = 4; 1501 1502 const Type** fields = TypeTuple::fields(argcnt); 1503 int argp = TypeFunc::Parms; 1504 fields[argp++] = TypeInt::INT; // set flag 1505 fields[argp++] = TypePtr::NOTNULL; // a array (result) 1506 fields[argp++] = TypePtr::NOTNULL; // b array (if set is set) 1507 fields[argp++] = TypeInt::INT; // array length 1508 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1509 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1510 1511 // result type needed 1512 fields = TypeTuple::fields(1); 1513 fields[TypeFunc::Parms + 0] = nullptr; // void 1514 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1515 return TypeFunc::make(domain, range); 1516 } 1517 1518 //------------- Interpreter state access for on stack replacement 1519 const TypeFunc* OptoRuntime::osr_end_Type() { 1520 // create input type (domain) 1521 const Type **fields = TypeTuple::fields(1); 1522 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1523 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1524 1525 // create result type 1526 fields = TypeTuple::fields(1); 1527 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1528 fields[TypeFunc::Parms+0] = nullptr; // void 1529 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1530 return TypeFunc::make(domain, range); 1531 } 1532 1533 //------------------------------------------------------------------------------------- 1534 // register policy 1535 1536 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1537 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1538 switch (register_save_policy[reg]) { 1539 case 'C': return false; //SOC 1540 case 'E': return true ; //SOE 1541 case 'N': return false; //NS 1542 case 'A': return false; //AS 1543 } 1544 ShouldNotReachHere(); 1545 return false; 1546 } 1547 1548 //----------------------------------------------------------------------- 1549 // Exceptions 1550 // 1551 1552 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1553 1554 // The method is an entry that is always called by a C++ method not 1555 // directly from compiled code. Compiled code will call the C++ method following. 1556 // We can't allow async exception to be installed during exception processing. 1557 JRT_ENTRY_NO_ASYNC_PROF(address, OptoRuntime, handle_exception_C_helper, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm)) 1558 // The frame we rethrow the exception to might not have been processed by the GC yet. 1559 // The stack watermark barrier takes care of detecting that and ensuring the frame 1560 // has updated oops. 1561 StackWatermarkSet::after_unwind(current); 1562 1563 // Do not confuse exception_oop with pending_exception. The exception_oop 1564 // is only used to pass arguments into the method. Not for general 1565 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1566 // the runtime stubs checks this on exit. 1567 assert(current->exception_oop() != nullptr, "exception oop is found"); 1568 address handler_address = nullptr; 1569 1570 Handle exception(current, current->exception_oop()); 1571 address pc = current->exception_pc(); 1572 1573 // Clear out the exception oop and pc since looking up an 1574 // exception handler can cause class loading, which might throw an 1575 // exception and those fields are expected to be clear during 1576 // normal bytecode execution. 1577 current->clear_exception_oop_and_pc(); 1578 1579 LogTarget(Info, exceptions) lt; 1580 if (lt.is_enabled()) { 1581 ResourceMark rm; 1582 LogStream ls(lt); 1583 trace_exception(&ls, exception(), pc, ""); 1584 } 1585 1586 // for AbortVMOnException flag 1587 Exceptions::debug_check_abort(exception); 1588 1589 #ifdef ASSERT 1590 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1591 // should throw an exception here 1592 ShouldNotReachHere(); 1593 } 1594 #endif 1595 1596 // new exception handling: this method is entered only from adapters 1597 // exceptions from compiled java methods are handled in compiled code 1598 // using rethrow node 1599 1600 nm = CodeCache::find_nmethod(pc); 1601 assert(nm != nullptr, "No NMethod found"); 1602 if (nm->is_native_method()) { 1603 fatal("Native method should not have path to exception handling"); 1604 } else { 1605 // we are switching to old paradigm: search for exception handler in caller_frame 1606 // instead in exception handler of caller_frame.sender() 1607 1608 if (JvmtiExport::can_post_on_exceptions()) { 1609 // "Full-speed catching" is not necessary here, 1610 // since we're notifying the VM on every catch. 1611 // Force deoptimization and the rest of the lookup 1612 // will be fine. 1613 deoptimize_caller_frame(current); 1614 } 1615 1616 // Check the stack guard pages. If enabled, look for handler in this frame; 1617 // otherwise, forcibly unwind the frame. 1618 // 1619 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1620 bool force_unwind = !current->stack_overflow_state()->reguard_stack(); 1621 bool deopting = false; 1622 if (nm->is_deopt_pc(pc)) { 1623 deopting = true; 1624 RegisterMap map(current, 1625 RegisterMap::UpdateMap::skip, 1626 RegisterMap::ProcessFrames::include, 1627 RegisterMap::WalkContinuation::skip); 1628 frame deoptee = current->last_frame().sender(&map); 1629 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1630 // Adjust the pc back to the original throwing pc 1631 pc = deoptee.pc(); 1632 } 1633 1634 // If we are forcing an unwind because of stack overflow then deopt is 1635 // irrelevant since we are throwing the frame away anyway. 1636 1637 if (deopting && !force_unwind) { 1638 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1639 } else { 1640 1641 handler_address = 1642 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); 1643 1644 if (handler_address == nullptr) { 1645 bool recursive_exception = false; 1646 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1647 assert (handler_address != nullptr, "must have compiled handler"); 1648 // Update the exception cache only when the unwind was not forced 1649 // and there didn't happen another exception during the computation of the 1650 // compiled exception handler. Checking for exception oop equality is not 1651 // sufficient because some exceptions are pre-allocated and reused. 1652 if (!force_unwind && !recursive_exception) { 1653 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1654 } 1655 } else { 1656 #ifdef ASSERT 1657 bool recursive_exception = false; 1658 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1659 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1660 p2i(handler_address), p2i(computed_address)); 1661 #endif 1662 } 1663 } 1664 1665 current->set_exception_pc(pc); 1666 current->set_exception_handler_pc(handler_address); 1667 1668 // Check if the exception PC is a MethodHandle call site. 1669 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1670 } 1671 1672 // Restore correct return pc. Was saved above. 1673 current->set_exception_oop(exception()); 1674 return handler_address; 1675 1676 JRT_END 1677 1678 // We are entering here from exception_blob 1679 // If there is a compiled exception handler in this method, we will continue there; 1680 // otherwise we will unwind the stack and continue at the caller of top frame method 1681 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1682 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1683 // we looked up the handler for has been deoptimized in the meantime. If it has been 1684 // we must not use the handler and instead return the deopt blob. 1685 address OptoRuntime::handle_exception_C(JavaThread* current) { 1686 // 1687 // We are in Java not VM and in debug mode we have a NoHandleMark 1688 // 1689 #ifndef PRODUCT 1690 SharedRuntime::_find_handler_ctr++; // find exception handler 1691 #endif 1692 debug_only(NoHandleMark __hm;) 1693 nmethod* nm = nullptr; 1694 address handler_address = nullptr; 1695 { 1696 // Enter the VM 1697 1698 ResetNoHandleMark rnhm; 1699 handler_address = handle_exception_C_helper(current, nm); 1700 } 1701 1702 // Back in java: Use no oops, DON'T safepoint 1703 1704 // Now check to see if the handler we are returning is in a now 1705 // deoptimized frame 1706 1707 if (nm != nullptr) { 1708 RegisterMap map(current, 1709 RegisterMap::UpdateMap::skip, 1710 RegisterMap::ProcessFrames::skip, 1711 RegisterMap::WalkContinuation::skip); 1712 frame caller = current->last_frame().sender(&map); 1713 #ifdef ASSERT 1714 assert(caller.is_compiled_frame(), "must be"); 1715 #endif // ASSERT 1716 if (caller.is_deoptimized_frame()) { 1717 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1718 } 1719 } 1720 return handler_address; 1721 } 1722 1723 //------------------------------rethrow---------------------------------------- 1724 // We get here after compiled code has executed a 'RethrowNode'. The callee 1725 // is either throwing or rethrowing an exception. The callee-save registers 1726 // have been restored, synchronized objects have been unlocked and the callee 1727 // stack frame has been removed. The return address was passed in. 1728 // Exception oop is passed as the 1st argument. This routine is then called 1729 // from the stub. On exit, we know where to jump in the caller's code. 1730 // After this C code exits, the stub will pop his frame and end in a jump 1731 // (instead of a return). We enter the caller's default handler. 1732 // 1733 // This must be JRT_LEAF: 1734 // - caller will not change its state as we cannot block on exit, 1735 // therefore raw_exception_handler_for_return_address is all it takes 1736 // to handle deoptimized blobs 1737 // 1738 // However, there needs to be a safepoint check in the middle! So compiled 1739 // safepoints are completely watertight. 1740 // 1741 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1742 // 1743 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1744 // 1745 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1746 // ret_pc will have been loaded from the stack, so for AArch64 will be signed. 1747 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc)); 1748 1749 #ifndef PRODUCT 1750 SharedRuntime::_rethrow_ctr++; // count rethrows 1751 #endif 1752 assert (exception != nullptr, "should have thrown a NullPointerException"); 1753 #ifdef ASSERT 1754 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1755 // should throw an exception here 1756 ShouldNotReachHere(); 1757 } 1758 #endif 1759 1760 thread->set_vm_result(exception); 1761 // Frame not compiled (handles deoptimization blob) 1762 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1763 } 1764 1765 1766 const TypeFunc *OptoRuntime::rethrow_Type() { 1767 // create input type (domain) 1768 const Type **fields = TypeTuple::fields(1); 1769 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1770 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1771 1772 // create result type (range) 1773 fields = TypeTuple::fields(1); 1774 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1775 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1776 1777 return TypeFunc::make(domain, range); 1778 } 1779 1780 1781 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1782 // Deoptimize the caller before continuing, as the compiled 1783 // exception handler table may not be valid. 1784 if (!StressCompiledExceptionHandlers && doit) { 1785 deoptimize_caller_frame(thread); 1786 } 1787 } 1788 1789 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1790 // Called from within the owner thread, so no need for safepoint 1791 RegisterMap reg_map(thread, 1792 RegisterMap::UpdateMap::include, 1793 RegisterMap::ProcessFrames::include, 1794 RegisterMap::WalkContinuation::skip); 1795 frame stub_frame = thread->last_frame(); 1796 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1797 frame caller_frame = stub_frame.sender(®_map); 1798 1799 // Deoptimize the caller frame. 1800 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1801 } 1802 1803 1804 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1805 // Called from within the owner thread, so no need for safepoint 1806 RegisterMap reg_map(thread, 1807 RegisterMap::UpdateMap::include, 1808 RegisterMap::ProcessFrames::include, 1809 RegisterMap::WalkContinuation::skip); 1810 frame stub_frame = thread->last_frame(); 1811 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1812 frame caller_frame = stub_frame.sender(®_map); 1813 return caller_frame.is_deoptimized_frame(); 1814 } 1815 1816 1817 const TypeFunc *OptoRuntime::register_finalizer_Type() { 1818 // create input type (domain) 1819 const Type **fields = TypeTuple::fields(1); 1820 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1821 // // The JavaThread* is passed to each routine as the last argument 1822 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1823 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1824 1825 // create result type (range) 1826 fields = TypeTuple::fields(0); 1827 1828 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1829 1830 return TypeFunc::make(domain,range); 1831 } 1832 1833 const TypeFunc *OptoRuntime::class_init_barrier_Type() { 1834 // create input type (domain) 1835 const Type** fields = TypeTuple::fields(1); 1836 fields[TypeFunc::Parms+0] = TypeKlassPtr::NOTNULL; 1837 // // The JavaThread* is passed to each routine as the last argument 1838 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1839 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1840 1841 // create result type (range) 1842 fields = TypeTuple::fields(0); 1843 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 1844 return TypeFunc::make(domain,range); 1845 } 1846 1847 #if INCLUDE_JFR 1848 const TypeFunc *OptoRuntime::class_id_load_barrier_Type() { 1849 // create input type (domain) 1850 const Type **fields = TypeTuple::fields(1); 1851 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS; 1852 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields); 1853 1854 // create result type (range) 1855 fields = TypeTuple::fields(0); 1856 1857 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields); 1858 1859 return TypeFunc::make(domain,range); 1860 } 1861 #endif 1862 1863 //----------------------------------------------------------------------------- 1864 // Dtrace support. entry and exit probes have the same signature 1865 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() { 1866 // create input type (domain) 1867 const Type **fields = TypeTuple::fields(2); 1868 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1869 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1870 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1871 1872 // create result type (range) 1873 fields = TypeTuple::fields(0); 1874 1875 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1876 1877 return TypeFunc::make(domain,range); 1878 } 1879 1880 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { 1881 // create input type (domain) 1882 const Type **fields = TypeTuple::fields(2); 1883 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1884 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1885 1886 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1887 1888 // create result type (range) 1889 fields = TypeTuple::fields(0); 1890 1891 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1892 1893 return TypeFunc::make(domain,range); 1894 } 1895 1896 1897 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, register_finalizer_C, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current)) 1898 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1899 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1900 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1901 JRT_END 1902 1903 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, class_init_barrier_C, OptoRuntime::class_init_barrier_C(Klass* k, JavaThread* current)) 1904 InstanceKlass* ik = InstanceKlass::cast(k); 1905 if (ik->should_be_initialized()) { 1906 ik->initialize(CHECK); 1907 } else if (UsePerfData) { 1908 _perf_OptoRuntime_class_init_barrier_redundant_count->inc(); 1909 } 1910 JRT_END 1911 1912 //----------------------------------------------------------------------------- 1913 1914 NamedCounter * volatile OptoRuntime::_named_counters = nullptr; 1915 1916 // 1917 // dump the collected NamedCounters. 1918 // 1919 void OptoRuntime::print_named_counters() { 1920 int total_lock_count = 0; 1921 int eliminated_lock_count = 0; 1922 1923 NamedCounter* c = _named_counters; 1924 while (c) { 1925 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1926 int count = c->count(); 1927 if (count > 0) { 1928 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1929 if (Verbose) { 1930 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1931 } 1932 total_lock_count += count; 1933 if (eliminated) { 1934 eliminated_lock_count += count; 1935 } 1936 } 1937 } 1938 c = c->next(); 1939 } 1940 if (total_lock_count > 0) { 1941 tty->print_cr("dynamic locks: %d", total_lock_count); 1942 if (eliminated_lock_count) { 1943 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1944 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1945 } 1946 } 1947 } 1948 1949 // 1950 // Allocate a new NamedCounter. The JVMState is used to generate the 1951 // name which consists of method@line for the inlining tree. 1952 // 1953 1954 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1955 int max_depth = youngest_jvms->depth(); 1956 1957 // Visit scopes from youngest to oldest. 1958 bool first = true; 1959 stringStream st; 1960 for (int depth = max_depth; depth >= 1; depth--) { 1961 JVMState* jvms = youngest_jvms->of_depth(depth); 1962 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; 1963 if (!first) { 1964 st.print(" "); 1965 } else { 1966 first = false; 1967 } 1968 int bci = jvms->bci(); 1969 if (bci < 0) bci = 0; 1970 if (m != nullptr) { 1971 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 1972 } else { 1973 st.print("no method"); 1974 } 1975 st.print("@%d", bci); 1976 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1977 } 1978 NamedCounter* c = new NamedCounter(st.freeze(), tag); 1979 1980 // atomically add the new counter to the head of the list. We only 1981 // add counters so this is safe. 1982 NamedCounter* head; 1983 do { 1984 c->set_next(nullptr); 1985 head = _named_counters; 1986 c->set_next(head); 1987 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 1988 return c; 1989 } 1990 1991 int trace_exception_counter = 0; 1992 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 1993 trace_exception_counter++; 1994 stringStream tempst; 1995 1996 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 1997 exception_oop->print_value_on(&tempst); 1998 tempst.print(" in "); 1999 CodeBlob* blob = CodeCache::find_blob(exception_pc); 2000 if (blob->is_nmethod()) { 2001 blob->as_nmethod()->method()->print_value_on(&tempst); 2002 } else if (blob->is_runtime_stub()) { 2003 tempst.print("<runtime-stub>"); 2004 } else { 2005 tempst.print("<unknown>"); 2006 } 2007 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 2008 tempst.print("]"); 2009 2010 st->print_raw_cr(tempst.freeze()); 2011 } 2012 2013 #define DO_COUNTERS2(macro2, macro1) \ 2014 macro2(OptoRuntime, new_instance_C) \ 2015 macro2(OptoRuntime, new_array_C) \ 2016 macro2(OptoRuntime, new_array_nozero_C) \ 2017 macro2(OptoRuntime, multianewarray2_C) \ 2018 macro2(OptoRuntime, multianewarray3_C) \ 2019 macro2(OptoRuntime, multianewarray4_C) \ 2020 macro2(OptoRuntime, multianewarrayN_C) \ 2021 macro2(OptoRuntime, monitor_notify_C) \ 2022 macro2(OptoRuntime, monitor_notifyAll_C) \ 2023 macro2(OptoRuntime, handle_exception_C_helper) \ 2024 macro2(OptoRuntime, register_finalizer_C) \ 2025 macro2(OptoRuntime, class_init_barrier_C) \ 2026 macro1(OptoRuntime, class_init_barrier_redundant) 2027 2028 #define INIT_COUNTER_TIME_AND_CNT(sub, name) \ 2029 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \ 2030 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 2031 2032 #define INIT_COUNTER_CNT(sub, name) \ 2033 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 2034 2035 void OptoRuntime::init_counters() { 2036 assert(CompilerConfig::is_c2_enabled(), ""); 2037 2038 if (UsePerfData) { 2039 EXCEPTION_MARK; 2040 2041 DO_COUNTERS2(INIT_COUNTER_TIME_AND_CNT, INIT_COUNTER_CNT) 2042 2043 if (HAS_PENDING_EXCEPTION) { 2044 vm_exit_during_initialization("jvm_perf_init failed unexpectedly"); 2045 } 2046 } 2047 } 2048 #undef INIT_COUNTER_TIME_AND_CNT 2049 #undef INIT_COUNTER_CNT 2050 2051 #define PRINT_COUNTER_TIME_AND_CNT(sub, name) { \ 2052 jlong count = _perf_##sub##_##name##_count->get_value(); \ 2053 if (count > 0) { \ 2054 st->print_cr(" %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \ 2055 _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \ 2056 _perf_##sub##_##name##_timer->thread_counter_value_us(), \ 2057 count); \ 2058 }} 2059 2060 #define PRINT_COUNTER_CNT(sub, name) { \ 2061 jlong count = _perf_##sub##_##name##_count->get_value(); \ 2062 if (count > 0) { \ 2063 st->print_cr(" %-30s = " JLONG_FORMAT_W(5) " events", #name, count); \ 2064 }} 2065 2066 void OptoRuntime::print_counters_on(outputStream* st) { 2067 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c2_enabled()) { 2068 DO_COUNTERS2(PRINT_COUNTER_TIME_AND_CNT, PRINT_COUNTER_CNT) 2069 } else { 2070 st->print_cr(" OptoRuntime: no info (%s is disabled)", 2071 (!CompilerConfig::is_c2_enabled() ? "C2" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData"))); 2072 } 2073 } 2074 2075 #undef PRINT_COUNTER_TIME_AND_CNT 2076 #undef PRINT_COUNTER_CNT 2077 #undef DO_COUNTERS2