1 /* 2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmClasses.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/pcDesc.hpp" 32 #include "code/scopeDesc.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/compilerDefinitions.inline.hpp" 36 #include "compiler/oopMap.hpp" 37 #include "gc/g1/g1HeapRegion.hpp" 38 #include "gc/shared/barrierSet.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "gc/shared/gcLocker.hpp" 41 #include "interpreter/bytecode.hpp" 42 #include "interpreter/interpreter.hpp" 43 #include "interpreter/linkResolver.hpp" 44 #include "logging/log.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/oopFactory.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/objArrayKlass.hpp" 49 #include "oops/klass.inline.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "oops/typeArrayOop.inline.hpp" 52 #include "opto/ad.hpp" 53 #include "opto/addnode.hpp" 54 #include "opto/callnode.hpp" 55 #include "opto/cfgnode.hpp" 56 #include "opto/graphKit.hpp" 57 #include "opto/machnode.hpp" 58 #include "opto/matcher.hpp" 59 #include "opto/memnode.hpp" 60 #include "opto/mulnode.hpp" 61 #include "opto/output.hpp" 62 #include "opto/runtime.hpp" 63 #include "opto/subnode.hpp" 64 #include "prims/jvmtiExport.hpp" 65 #include "runtime/atomic.hpp" 66 #include "runtime/frame.inline.hpp" 67 #include "runtime/handles.inline.hpp" 68 #include "runtime/interfaceSupport.inline.hpp" 69 #include "runtime/java.hpp" 70 #include "runtime/javaCalls.hpp" 71 #include "runtime/perfData.inline.hpp" 72 #include "runtime/sharedRuntime.hpp" 73 #include "runtime/signature.hpp" 74 #include "runtime/stackWatermarkSet.hpp" 75 #include "runtime/synchronizer.hpp" 76 #include "runtime/threadCritical.hpp" 77 #include "runtime/threadWXSetters.inline.hpp" 78 #include "runtime/vframe.hpp" 79 #include "runtime/vframeArray.hpp" 80 #include "runtime/vframe_hp.hpp" 81 #include "services/management.hpp" 82 #include "utilities/copy.hpp" 83 #include "utilities/preserveException.hpp" 84 85 86 // For debugging purposes: 87 // To force FullGCALot inside a runtime function, add the following two lines 88 // 89 // Universe::release_fullgc_alot_dummy(); 90 // Universe::heap()->collect(); 91 // 92 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 93 94 95 96 97 // Compiled code entry points 98 address OptoRuntime::_new_instance_Java = nullptr; 99 address OptoRuntime::_new_array_Java = nullptr; 100 address OptoRuntime::_new_array_nozero_Java = nullptr; 101 address OptoRuntime::_multianewarray2_Java = nullptr; 102 address OptoRuntime::_multianewarray3_Java = nullptr; 103 address OptoRuntime::_multianewarray4_Java = nullptr; 104 address OptoRuntime::_multianewarray5_Java = nullptr; 105 address OptoRuntime::_multianewarrayN_Java = nullptr; 106 address OptoRuntime::_vtable_must_compile_Java = nullptr; 107 address OptoRuntime::_complete_monitor_locking_Java = nullptr; 108 address OptoRuntime::_monitor_notify_Java = nullptr; 109 address OptoRuntime::_monitor_notifyAll_Java = nullptr; 110 address OptoRuntime::_rethrow_Java = nullptr; 111 112 address OptoRuntime::_slow_arraycopy_Java = nullptr; 113 address OptoRuntime::_register_finalizer_Java = nullptr; 114 address OptoRuntime::_class_init_barrier_Java = nullptr; 115 #if INCLUDE_JVMTI 116 address OptoRuntime::_notify_jvmti_vthread_start = nullptr; 117 address OptoRuntime::_notify_jvmti_vthread_end = nullptr; 118 address OptoRuntime::_notify_jvmti_vthread_mount = nullptr; 119 address OptoRuntime::_notify_jvmti_vthread_unmount = nullptr; 120 #endif 121 122 ExceptionBlob* OptoRuntime::_exception_blob; 123 124 PerfCounter* _perf_OptoRuntime_class_init_barrier_redundant_count = nullptr; 125 126 // This should be called in an assertion at the start of OptoRuntime routines 127 // which are entered from compiled code (all of them) 128 #ifdef ASSERT 129 static bool check_compiled_frame(JavaThread* thread) { 130 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 131 RegisterMap map(thread, 132 RegisterMap::UpdateMap::skip, 133 RegisterMap::ProcessFrames::include, 134 RegisterMap::WalkContinuation::skip); 135 frame caller = thread->last_frame().sender(&map); 136 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 137 return true; 138 } 139 #endif // ASSERT 140 141 142 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ 143 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ 144 if (var == nullptr) { return false; } 145 146 bool OptoRuntime::generate(ciEnv* env) { 147 init_counters(); 148 149 generate_exception_blob(); 150 151 // Note: tls: Means fetching the return oop out of the thread-local storage 152 // 153 // variable/name type-function-gen , runtime method ,fncy_jp, tls,retpc 154 // ------------------------------------------------------------------------------------------------------------------------------- 155 gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true, false); 156 gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true, false); 157 gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true, false); 158 gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true, false); 159 gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true, false); 160 gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true, false); 161 gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true, false); 162 gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true, false); 163 #if INCLUDE_JVMTI 164 gen(env, _notify_jvmti_vthread_start , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_start, 0, true, false); 165 gen(env, _notify_jvmti_vthread_end , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_end, 0, true, false); 166 gen(env, _notify_jvmti_vthread_mount , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_mount, 0, true, false); 167 gen(env, _notify_jvmti_vthread_unmount , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_unmount, 0, true, false); 168 #endif 169 gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false); 170 gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false); 171 gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false); 172 gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , true ); 173 174 gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false); 175 gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false); 176 gen(env, _class_init_barrier_Java , class_init_barrier_Type , class_init_barrier , 0 , false, false); 177 178 return true; 179 } 180 181 #undef gen 182 183 184 // Helper method to do generation of RunTimeStub's 185 address OptoRuntime::generate_stub(ciEnv* env, 186 TypeFunc_generator gen, address C_function, 187 const char *name, int is_fancy_jump, 188 bool pass_tls, 189 bool return_pc) { 190 191 // Matching the default directive, we currently have no method to match. 192 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompilerThread::current()->compiler()); 193 ResourceMark rm; 194 Compile C(env, gen, C_function, name, is_fancy_jump, pass_tls, return_pc, directive); 195 DirectivesStack::release(directive); 196 return C.stub_entry_point(); 197 } 198 199 const char* OptoRuntime::stub_name(address entry) { 200 #ifndef PRODUCT 201 CodeBlob* cb = CodeCache::find_blob(entry); 202 RuntimeStub* rs =(RuntimeStub *)cb; 203 assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); 204 return rs->name(); 205 #else 206 // Fast implementation for product mode (maybe it should be inlined too) 207 return "runtime stub"; 208 #endif 209 } 210 211 212 //============================================================================= 213 // Opto compiler runtime routines 214 //============================================================================= 215 216 217 //=============================allocation====================================== 218 // We failed the fast-path allocation. Now we need to do a scavenge or GC 219 // and try allocation again. 220 221 // object allocation 222 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_instance_C, OptoRuntime::new_instance_C(Klass* klass, JavaThread* current)) 223 JRT_BLOCK; 224 #ifndef PRODUCT 225 SharedRuntime::_new_instance_ctr++; // new instance requires GC 226 #endif 227 assert(check_compiled_frame(current), "incorrect caller"); 228 229 // These checks are cheap to make and support reflective allocation. 230 int lh = klass->layout_helper(); 231 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 232 Handle holder(current, klass->klass_holder()); // keep the klass alive 233 klass->check_valid_for_instantiation(false, THREAD); 234 if (!HAS_PENDING_EXCEPTION) { 235 InstanceKlass::cast(klass)->initialize(THREAD); 236 } 237 } 238 239 if (!HAS_PENDING_EXCEPTION) { 240 // Scavenge and allocate an instance. 241 Handle holder(current, klass->klass_holder()); // keep the klass alive 242 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 243 current->set_vm_result(result); 244 245 // Pass oops back through thread local storage. Our apparent type to Java 246 // is that we return an oop, but we can block on exit from this routine and 247 // a GC can trash the oop in C's return register. The generated stub will 248 // fetch the oop from TLS after any possible GC. 249 } 250 251 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 252 JRT_BLOCK_END; 253 254 // inform GC that we won't do card marks for initializing writes. 255 SharedRuntime::on_slowpath_allocation_exit(current); 256 JRT_END 257 258 259 // array allocation 260 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_C, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread* current)) 261 JRT_BLOCK; 262 #ifndef PRODUCT 263 SharedRuntime::_new_array_ctr++; // new array requires GC 264 #endif 265 assert(check_compiled_frame(current), "incorrect caller"); 266 267 // Scavenge and allocate an instance. 268 oop result; 269 270 if (array_type->is_typeArray_klass()) { 271 // The oopFactory likes to work with the element type. 272 // (We could bypass the oopFactory, since it doesn't add much value.) 273 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 274 result = oopFactory::new_typeArray(elem_type, len, THREAD); 275 } else { 276 // Although the oopFactory likes to work with the elem_type, 277 // the compiler prefers the array_type, since it must already have 278 // that latter value in hand for the fast path. 279 Handle holder(current, array_type->klass_holder()); // keep the array klass alive 280 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 281 result = oopFactory::new_objArray(elem_type, len, THREAD); 282 } 283 284 // Pass oops back through thread local storage. Our apparent type to Java 285 // is that we return an oop, but we can block on exit from this routine and 286 // a GC can trash the oop in C's return register. The generated stub will 287 // fetch the oop from TLS after any possible GC. 288 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 289 current->set_vm_result(result); 290 JRT_BLOCK_END; 291 292 // inform GC that we won't do card marks for initializing writes. 293 SharedRuntime::on_slowpath_allocation_exit(current); 294 JRT_END 295 296 // array allocation without zeroing 297 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, new_array_nozero_C, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread* current)) 298 JRT_BLOCK; 299 #ifndef PRODUCT 300 SharedRuntime::_new_array_ctr++; // new array requires GC 301 #endif 302 assert(check_compiled_frame(current), "incorrect caller"); 303 304 // Scavenge and allocate an instance. 305 oop result; 306 307 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 308 // The oopFactory likes to work with the element type. 309 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 310 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 311 312 // Pass oops back through thread local storage. Our apparent type to Java 313 // is that we return an oop, but we can block on exit from this routine and 314 // a GC can trash the oop in C's return register. The generated stub will 315 // fetch the oop from TLS after any possible GC. 316 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 317 current->set_vm_result(result); 318 JRT_BLOCK_END; 319 320 321 // inform GC that we won't do card marks for initializing writes. 322 SharedRuntime::on_slowpath_allocation_exit(current); 323 324 oop result = current->vm_result(); 325 if ((len > 0) && (result != nullptr) && 326 is_deoptimized_caller_frame(current)) { 327 // Zero array here if the caller is deoptimized. 328 const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); 329 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 330 size_t hs_bytes = arrayOopDesc::base_offset_in_bytes(elem_type); 331 assert(is_aligned(hs_bytes, BytesPerInt), "must be 4 byte aligned"); 332 HeapWord* obj = cast_from_oop<HeapWord*>(result); 333 if (!is_aligned(hs_bytes, BytesPerLong)) { 334 *reinterpret_cast<jint*>(reinterpret_cast<char*>(obj) + hs_bytes) = 0; 335 hs_bytes += BytesPerInt; 336 } 337 338 // Optimized zeroing. 339 assert(is_aligned(hs_bytes, BytesPerLong), "must be 8-byte aligned"); 340 const size_t aligned_hs = hs_bytes / BytesPerLong; 341 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 342 } 343 344 JRT_END 345 346 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 347 348 // multianewarray for 2 dimensions 349 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray2_C, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread* current)) 350 #ifndef PRODUCT 351 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 352 #endif 353 assert(check_compiled_frame(current), "incorrect caller"); 354 assert(elem_type->is_klass(), "not a class"); 355 jint dims[2]; 356 dims[0] = len1; 357 dims[1] = len2; 358 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 359 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 360 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 361 current->set_vm_result(obj); 362 JRT_END 363 364 // multianewarray for 3 dimensions 365 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray3_C, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread* current)) 366 #ifndef PRODUCT 367 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 368 #endif 369 assert(check_compiled_frame(current), "incorrect caller"); 370 assert(elem_type->is_klass(), "not a class"); 371 jint dims[3]; 372 dims[0] = len1; 373 dims[1] = len2; 374 dims[2] = len3; 375 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 376 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 377 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 378 current->set_vm_result(obj); 379 JRT_END 380 381 // multianewarray for 4 dimensions 382 JRT_ENTRY_PROF(void, OptoRuntime, multianewarray4_C, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread* current)) 383 #ifndef PRODUCT 384 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 385 #endif 386 assert(check_compiled_frame(current), "incorrect caller"); 387 assert(elem_type->is_klass(), "not a class"); 388 jint dims[4]; 389 dims[0] = len1; 390 dims[1] = len2; 391 dims[2] = len3; 392 dims[3] = len4; 393 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 394 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 395 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 396 current->set_vm_result(obj); 397 JRT_END 398 399 // multianewarray for 5 dimensions 400 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread* current)) 401 #ifndef PRODUCT 402 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 403 #endif 404 assert(check_compiled_frame(current), "incorrect caller"); 405 assert(elem_type->is_klass(), "not a class"); 406 jint dims[5]; 407 dims[0] = len1; 408 dims[1] = len2; 409 dims[2] = len3; 410 dims[3] = len4; 411 dims[4] = len5; 412 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 413 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 414 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 415 current->set_vm_result(obj); 416 JRT_END 417 418 JRT_ENTRY_PROF(void, OptoRuntime, multianewarrayN_C, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread* current)) 419 assert(check_compiled_frame(current), "incorrect caller"); 420 assert(elem_type->is_klass(), "not a class"); 421 assert(oop(dims)->is_typeArray(), "not an array"); 422 423 ResourceMark rm; 424 jint len = dims->length(); 425 assert(len > 0, "Dimensions array should contain data"); 426 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 427 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 428 c_dims, len); 429 430 Handle holder(current, elem_type->klass_holder()); // keep the klass alive 431 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 432 deoptimize_caller_frame(current, HAS_PENDING_EXCEPTION); 433 current->set_vm_result(obj); 434 JRT_END 435 436 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notify_C, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread* current)) 437 438 // Very few notify/notifyAll operations find any threads on the waitset, so 439 // the dominant fast-path is to simply return. 440 // Relatedly, it's critical that notify/notifyAll be fast in order to 441 // reduce lock hold times. 442 if (!SafepointSynchronize::is_synchronizing()) { 443 if (ObjectSynchronizer::quick_notify(obj, current, false)) { 444 return; 445 } 446 } 447 448 // This is the case the fast-path above isn't provisioned to handle. 449 // The fast-path is designed to handle frequently arising cases in an efficient manner. 450 // (The fast-path is just a degenerate variant of the slow-path). 451 // Perform the dreaded state transition and pass control into the slow-path. 452 JRT_BLOCK; 453 Handle h_obj(current, obj); 454 ObjectSynchronizer::notify(h_obj, CHECK); 455 JRT_BLOCK_END; 456 JRT_END 457 458 JRT_BLOCK_ENTRY_PROF(void, OptoRuntime, monitor_notifyAll_C, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread* current)) 459 460 if (!SafepointSynchronize::is_synchronizing() ) { 461 if (ObjectSynchronizer::quick_notify(obj, current, true)) { 462 return; 463 } 464 } 465 466 // This is the case the fast-path above isn't provisioned to handle. 467 // The fast-path is designed to handle frequently arising cases in an efficient manner. 468 // (The fast-path is just a degenerate variant of the slow-path). 469 // Perform the dreaded state transition and pass control into the slow-path. 470 JRT_BLOCK; 471 Handle h_obj(current, obj); 472 ObjectSynchronizer::notifyall(h_obj, CHECK); 473 JRT_BLOCK_END; 474 JRT_END 475 476 const TypeFunc *OptoRuntime::new_instance_Type() { 477 // create input type (domain) 478 const Type **fields = TypeTuple::fields(1); 479 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 480 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 481 482 // create result type (range) 483 fields = TypeTuple::fields(1); 484 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 485 486 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 487 488 return TypeFunc::make(domain, range); 489 } 490 491 #if INCLUDE_JVMTI 492 const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() { 493 // create input type (domain) 494 const Type **fields = TypeTuple::fields(2); 495 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop 496 fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean 497 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 498 499 // no result type needed 500 fields = TypeTuple::fields(1); 501 fields[TypeFunc::Parms+0] = nullptr; // void 502 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 503 504 return TypeFunc::make(domain,range); 505 } 506 #endif 507 508 const TypeFunc *OptoRuntime::athrow_Type() { 509 // create input type (domain) 510 const Type **fields = TypeTuple::fields(1); 511 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 512 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 513 514 // create result type (range) 515 fields = TypeTuple::fields(0); 516 517 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 518 519 return TypeFunc::make(domain, range); 520 } 521 522 523 const TypeFunc *OptoRuntime::new_array_Type() { 524 // create input type (domain) 525 const Type **fields = TypeTuple::fields(2); 526 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 527 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 528 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 529 530 // create result type (range) 531 fields = TypeTuple::fields(1); 532 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 533 534 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 535 536 return TypeFunc::make(domain, range); 537 } 538 539 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) { 540 // create input type (domain) 541 const int nargs = ndim + 1; 542 const Type **fields = TypeTuple::fields(nargs); 543 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 544 for( int i = 1; i < nargs; i++ ) 545 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 546 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 547 548 // create result type (range) 549 fields = TypeTuple::fields(1); 550 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 551 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 552 553 return TypeFunc::make(domain, range); 554 } 555 556 const TypeFunc *OptoRuntime::multianewarray2_Type() { 557 return multianewarray_Type(2); 558 } 559 560 const TypeFunc *OptoRuntime::multianewarray3_Type() { 561 return multianewarray_Type(3); 562 } 563 564 const TypeFunc *OptoRuntime::multianewarray4_Type() { 565 return multianewarray_Type(4); 566 } 567 568 const TypeFunc *OptoRuntime::multianewarray5_Type() { 569 return multianewarray_Type(5); 570 } 571 572 const TypeFunc *OptoRuntime::multianewarrayN_Type() { 573 // create input type (domain) 574 const Type **fields = TypeTuple::fields(2); 575 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 576 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 577 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 578 579 // create result type (range) 580 fields = TypeTuple::fields(1); 581 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 582 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 583 584 return TypeFunc::make(domain, range); 585 } 586 587 const TypeFunc *OptoRuntime::uncommon_trap_Type() { 588 // create input type (domain) 589 const Type **fields = TypeTuple::fields(1); 590 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 591 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 592 593 // create result type (range) 594 fields = TypeTuple::fields(0); 595 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 596 597 return TypeFunc::make(domain, range); 598 } 599 600 //----------------------------------------------------------------------------- 601 // Monitor Handling 602 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() { 603 // create input type (domain) 604 const Type **fields = TypeTuple::fields(2); 605 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 606 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 607 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 608 609 // create result type (range) 610 fields = TypeTuple::fields(0); 611 612 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 613 614 return TypeFunc::make(domain,range); 615 } 616 617 618 //----------------------------------------------------------------------------- 619 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() { 620 // create input type (domain) 621 const Type **fields = TypeTuple::fields(3); 622 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 623 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 624 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 625 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 626 627 // create result type (range) 628 fields = TypeTuple::fields(0); 629 630 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 631 632 return TypeFunc::make(domain, range); 633 } 634 635 const TypeFunc *OptoRuntime::monitor_notify_Type() { 636 // create input type (domain) 637 const Type **fields = TypeTuple::fields(1); 638 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 639 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 640 641 // create result type (range) 642 fields = TypeTuple::fields(0); 643 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 644 return TypeFunc::make(domain, range); 645 } 646 647 const TypeFunc* OptoRuntime::flush_windows_Type() { 648 // create input type (domain) 649 const Type** fields = TypeTuple::fields(1); 650 fields[TypeFunc::Parms+0] = nullptr; // void 651 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 652 653 // create result type 654 fields = TypeTuple::fields(1); 655 fields[TypeFunc::Parms+0] = nullptr; // void 656 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 657 658 return TypeFunc::make(domain, range); 659 } 660 661 const TypeFunc* OptoRuntime::l2f_Type() { 662 // create input type (domain) 663 const Type **fields = TypeTuple::fields(2); 664 fields[TypeFunc::Parms+0] = TypeLong::LONG; 665 fields[TypeFunc::Parms+1] = Type::HALF; 666 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 667 668 // create result type (range) 669 fields = TypeTuple::fields(1); 670 fields[TypeFunc::Parms+0] = Type::FLOAT; 671 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 672 673 return TypeFunc::make(domain, range); 674 } 675 676 const TypeFunc* OptoRuntime::modf_Type() { 677 const Type **fields = TypeTuple::fields(2); 678 fields[TypeFunc::Parms+0] = Type::FLOAT; 679 fields[TypeFunc::Parms+1] = Type::FLOAT; 680 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 681 682 // create result type (range) 683 fields = TypeTuple::fields(1); 684 fields[TypeFunc::Parms+0] = Type::FLOAT; 685 686 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 687 688 return TypeFunc::make(domain, range); 689 } 690 691 const TypeFunc *OptoRuntime::Math_D_D_Type() { 692 // create input type (domain) 693 const Type **fields = TypeTuple::fields(2); 694 // Symbol* name of class to be loaded 695 fields[TypeFunc::Parms+0] = Type::DOUBLE; 696 fields[TypeFunc::Parms+1] = Type::HALF; 697 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 698 699 // create result type (range) 700 fields = TypeTuple::fields(2); 701 fields[TypeFunc::Parms+0] = Type::DOUBLE; 702 fields[TypeFunc::Parms+1] = Type::HALF; 703 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 704 705 return TypeFunc::make(domain, range); 706 } 707 708 const TypeFunc *OptoRuntime::Math_Vector_Vector_Type(uint num_arg, const TypeVect* in_type, const TypeVect* out_type) { 709 // create input type (domain) 710 const Type **fields = TypeTuple::fields(num_arg); 711 // Symbol* name of class to be loaded 712 assert(num_arg > 0, "must have at least 1 input"); 713 for (uint i = 0; i < num_arg; i++) { 714 fields[TypeFunc::Parms+i] = in_type; 715 } 716 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+num_arg, fields); 717 718 // create result type (range) 719 const uint num_ret = 1; 720 fields = TypeTuple::fields(num_ret); 721 fields[TypeFunc::Parms+0] = out_type; 722 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+num_ret, fields); 723 724 return TypeFunc::make(domain, range); 725 } 726 727 const TypeFunc* OptoRuntime::Math_DD_D_Type() { 728 const Type **fields = TypeTuple::fields(4); 729 fields[TypeFunc::Parms+0] = Type::DOUBLE; 730 fields[TypeFunc::Parms+1] = Type::HALF; 731 fields[TypeFunc::Parms+2] = Type::DOUBLE; 732 fields[TypeFunc::Parms+3] = Type::HALF; 733 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 734 735 // create result type (range) 736 fields = TypeTuple::fields(2); 737 fields[TypeFunc::Parms+0] = Type::DOUBLE; 738 fields[TypeFunc::Parms+1] = Type::HALF; 739 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 740 741 return TypeFunc::make(domain, range); 742 } 743 744 //-------------- currentTimeMillis, currentTimeNanos, etc 745 746 const TypeFunc* OptoRuntime::void_long_Type() { 747 // create input type (domain) 748 const Type **fields = TypeTuple::fields(0); 749 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 750 751 // create result type (range) 752 fields = TypeTuple::fields(2); 753 fields[TypeFunc::Parms+0] = TypeLong::LONG; 754 fields[TypeFunc::Parms+1] = Type::HALF; 755 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 756 757 return TypeFunc::make(domain, range); 758 } 759 760 const TypeFunc* OptoRuntime::void_void_Type() { 761 // create input type (domain) 762 const Type **fields = TypeTuple::fields(0); 763 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 764 765 // create result type (range) 766 fields = TypeTuple::fields(0); 767 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 768 return TypeFunc::make(domain, range); 769 } 770 771 const TypeFunc* OptoRuntime::jfr_write_checkpoint_Type() { 772 // create input type (domain) 773 const Type **fields = TypeTuple::fields(0); 774 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 775 776 // create result type (range) 777 fields = TypeTuple::fields(0); 778 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 779 return TypeFunc::make(domain, range); 780 } 781 782 783 // Takes as parameters: 784 // void *dest 785 // long size 786 // uchar byte 787 const TypeFunc* OptoRuntime::make_setmemory_Type() { 788 // create input type (domain) 789 int argcnt = NOT_LP64(3) LP64_ONLY(4); 790 const Type** fields = TypeTuple::fields(argcnt); 791 int argp = TypeFunc::Parms; 792 fields[argp++] = TypePtr::NOTNULL; // dest 793 fields[argp++] = TypeX_X; // size 794 LP64_ONLY(fields[argp++] = Type::HALF); // size 795 fields[argp++] = TypeInt::UBYTE; // bytevalue 796 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 797 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 798 799 // no result type needed 800 fields = TypeTuple::fields(1); 801 fields[TypeFunc::Parms+0] = nullptr; // void 802 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 803 return TypeFunc::make(domain, range); 804 } 805 806 // arraycopy stub variations: 807 enum ArrayCopyType { 808 ac_fast, // void(ptr, ptr, size_t) 809 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 810 ac_slow, // void(ptr, int, ptr, int, int) 811 ac_generic // int(ptr, int, ptr, int, int) 812 }; 813 814 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 815 // create input type (domain) 816 int num_args = (act == ac_fast ? 3 : 5); 817 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 818 int argcnt = num_args; 819 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 820 const Type** fields = TypeTuple::fields(argcnt); 821 int argp = TypeFunc::Parms; 822 fields[argp++] = TypePtr::NOTNULL; // src 823 if (num_size_args == 0) { 824 fields[argp++] = TypeInt::INT; // src_pos 825 } 826 fields[argp++] = TypePtr::NOTNULL; // dest 827 if (num_size_args == 0) { 828 fields[argp++] = TypeInt::INT; // dest_pos 829 fields[argp++] = TypeInt::INT; // length 830 } 831 while (num_size_args-- > 0) { 832 fields[argp++] = TypeX_X; // size in whatevers (size_t) 833 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 834 } 835 if (act == ac_checkcast) { 836 fields[argp++] = TypePtr::NOTNULL; // super_klass 837 } 838 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 839 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 840 841 // create result type if needed 842 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 843 fields = TypeTuple::fields(1); 844 if (retcnt == 0) 845 fields[TypeFunc::Parms+0] = nullptr; // void 846 else 847 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 848 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 849 return TypeFunc::make(domain, range); 850 } 851 852 const TypeFunc* OptoRuntime::fast_arraycopy_Type() { 853 // This signature is simple: Two base pointers and a size_t. 854 return make_arraycopy_Type(ac_fast); 855 } 856 857 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() { 858 // An extension of fast_arraycopy_Type which adds type checking. 859 return make_arraycopy_Type(ac_checkcast); 860 } 861 862 const TypeFunc* OptoRuntime::slow_arraycopy_Type() { 863 // This signature is exactly the same as System.arraycopy. 864 // There are no intptr_t (int/long) arguments. 865 return make_arraycopy_Type(ac_slow); 866 } 867 868 const TypeFunc* OptoRuntime::generic_arraycopy_Type() { 869 // This signature is like System.arraycopy, except that it returns status. 870 return make_arraycopy_Type(ac_generic); 871 } 872 873 874 const TypeFunc* OptoRuntime::array_fill_Type() { 875 const Type** fields; 876 int argp = TypeFunc::Parms; 877 // create input type (domain): pointer, int, size_t 878 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 879 fields[argp++] = TypePtr::NOTNULL; 880 fields[argp++] = TypeInt::INT; 881 fields[argp++] = TypeX_X; // size in whatevers (size_t) 882 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 883 const TypeTuple *domain = TypeTuple::make(argp, fields); 884 885 // create result type 886 fields = TypeTuple::fields(1); 887 fields[TypeFunc::Parms+0] = nullptr; // void 888 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 889 890 return TypeFunc::make(domain, range); 891 } 892 893 const TypeFunc* OptoRuntime::array_partition_Type() { 894 // create input type (domain) 895 int num_args = 7; 896 int argcnt = num_args; 897 const Type** fields = TypeTuple::fields(argcnt); 898 int argp = TypeFunc::Parms; 899 fields[argp++] = TypePtr::NOTNULL; // array 900 fields[argp++] = TypeInt::INT; // element type 901 fields[argp++] = TypeInt::INT; // low 902 fields[argp++] = TypeInt::INT; // end 903 fields[argp++] = TypePtr::NOTNULL; // pivot_indices (int array) 904 fields[argp++] = TypeInt::INT; // indexPivot1 905 fields[argp++] = TypeInt::INT; // indexPivot2 906 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 907 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 908 909 // no result type needed 910 fields = TypeTuple::fields(1); 911 fields[TypeFunc::Parms+0] = nullptr; // void 912 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 913 return TypeFunc::make(domain, range); 914 } 915 916 const TypeFunc* OptoRuntime::array_sort_Type() { 917 // create input type (domain) 918 int num_args = 4; 919 int argcnt = num_args; 920 const Type** fields = TypeTuple::fields(argcnt); 921 int argp = TypeFunc::Parms; 922 fields[argp++] = TypePtr::NOTNULL; // array 923 fields[argp++] = TypeInt::INT; // element type 924 fields[argp++] = TypeInt::INT; // fromIndex 925 fields[argp++] = TypeInt::INT; // toIndex 926 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 927 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 928 929 // no result type needed 930 fields = TypeTuple::fields(1); 931 fields[TypeFunc::Parms+0] = nullptr; // void 932 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 933 return TypeFunc::make(domain, range); 934 } 935 936 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant) 937 const TypeFunc* OptoRuntime::aescrypt_block_Type() { 938 // create input type (domain) 939 int num_args = 3; 940 int argcnt = num_args; 941 const Type** fields = TypeTuple::fields(argcnt); 942 int argp = TypeFunc::Parms; 943 fields[argp++] = TypePtr::NOTNULL; // src 944 fields[argp++] = TypePtr::NOTNULL; // dest 945 fields[argp++] = TypePtr::NOTNULL; // k array 946 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 947 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 948 949 // no result type needed 950 fields = TypeTuple::fields(1); 951 fields[TypeFunc::Parms+0] = nullptr; // void 952 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 953 return TypeFunc::make(domain, range); 954 } 955 956 /** 957 * int updateBytesCRC32(int crc, byte* b, int len) 958 */ 959 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { 960 // create input type (domain) 961 int num_args = 3; 962 int argcnt = num_args; 963 const Type** fields = TypeTuple::fields(argcnt); 964 int argp = TypeFunc::Parms; 965 fields[argp++] = TypeInt::INT; // crc 966 fields[argp++] = TypePtr::NOTNULL; // src 967 fields[argp++] = TypeInt::INT; // len 968 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 969 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 970 971 // result type needed 972 fields = TypeTuple::fields(1); 973 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 974 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 975 return TypeFunc::make(domain, range); 976 } 977 978 /** 979 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table) 980 */ 981 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() { 982 // create input type (domain) 983 int num_args = 4; 984 int argcnt = num_args; 985 const Type** fields = TypeTuple::fields(argcnt); 986 int argp = TypeFunc::Parms; 987 fields[argp++] = TypeInt::INT; // crc 988 fields[argp++] = TypePtr::NOTNULL; // buf 989 fields[argp++] = TypeInt::INT; // len 990 fields[argp++] = TypePtr::NOTNULL; // table 991 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 992 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 993 994 // result type needed 995 fields = TypeTuple::fields(1); 996 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 997 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 998 return TypeFunc::make(domain, range); 999 } 1000 1001 /** 1002 * int updateBytesAdler32(int adler, bytes* b, int off, int len) 1003 */ 1004 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() { 1005 // create input type (domain) 1006 int num_args = 3; 1007 int argcnt = num_args; 1008 const Type** fields = TypeTuple::fields(argcnt); 1009 int argp = TypeFunc::Parms; 1010 fields[argp++] = TypeInt::INT; // crc 1011 fields[argp++] = TypePtr::NOTNULL; // src + offset 1012 fields[argp++] = TypeInt::INT; // len 1013 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1014 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1015 1016 // result type needed 1017 fields = TypeTuple::fields(1); 1018 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 1019 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1020 return TypeFunc::make(domain, range); 1021 } 1022 1023 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 1024 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { 1025 // create input type (domain) 1026 int num_args = 5; 1027 int argcnt = num_args; 1028 const Type** fields = TypeTuple::fields(argcnt); 1029 int argp = TypeFunc::Parms; 1030 fields[argp++] = TypePtr::NOTNULL; // src 1031 fields[argp++] = TypePtr::NOTNULL; // dest 1032 fields[argp++] = TypePtr::NOTNULL; // k array 1033 fields[argp++] = TypePtr::NOTNULL; // r array 1034 fields[argp++] = TypeInt::INT; // src len 1035 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1036 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1037 1038 // returning cipher len (int) 1039 fields = TypeTuple::fields(1); 1040 fields[TypeFunc::Parms+0] = TypeInt::INT; 1041 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1042 return TypeFunc::make(domain, range); 1043 } 1044 1045 // for electronicCodeBook calls of aescrypt encrypt/decrypt, three pointers and a length, returning int 1046 const TypeFunc* OptoRuntime::electronicCodeBook_aescrypt_Type() { 1047 // create input type (domain) 1048 int num_args = 4; 1049 int argcnt = num_args; 1050 const Type** fields = TypeTuple::fields(argcnt); 1051 int argp = TypeFunc::Parms; 1052 fields[argp++] = TypePtr::NOTNULL; // src 1053 fields[argp++] = TypePtr::NOTNULL; // dest 1054 fields[argp++] = TypePtr::NOTNULL; // k array 1055 fields[argp++] = TypeInt::INT; // src len 1056 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1057 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1058 1059 // returning cipher len (int) 1060 fields = TypeTuple::fields(1); 1061 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1062 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1063 return TypeFunc::make(domain, range); 1064 } 1065 1066 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 1067 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() { 1068 // create input type (domain) 1069 int num_args = 7; 1070 int argcnt = num_args; 1071 const Type** fields = TypeTuple::fields(argcnt); 1072 int argp = TypeFunc::Parms; 1073 fields[argp++] = TypePtr::NOTNULL; // src 1074 fields[argp++] = TypePtr::NOTNULL; // dest 1075 fields[argp++] = TypePtr::NOTNULL; // k array 1076 fields[argp++] = TypePtr::NOTNULL; // counter array 1077 fields[argp++] = TypeInt::INT; // src len 1078 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 1079 fields[argp++] = TypePtr::NOTNULL; // saved used addr 1080 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1081 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1082 // returning cipher len (int) 1083 fields = TypeTuple::fields(1); 1084 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1085 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1086 return TypeFunc::make(domain, range); 1087 } 1088 1089 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 1090 const TypeFunc* OptoRuntime::galoisCounterMode_aescrypt_Type() { 1091 // create input type (domain) 1092 int num_args = 8; 1093 int argcnt = num_args; 1094 const Type** fields = TypeTuple::fields(argcnt); 1095 int argp = TypeFunc::Parms; 1096 fields[argp++] = TypePtr::NOTNULL; // byte[] in + inOfs 1097 fields[argp++] = TypeInt::INT; // int len 1098 fields[argp++] = TypePtr::NOTNULL; // byte[] ct + ctOfs 1099 fields[argp++] = TypePtr::NOTNULL; // byte[] out + outOfs 1100 fields[argp++] = TypePtr::NOTNULL; // byte[] key from AESCrypt obj 1101 fields[argp++] = TypePtr::NOTNULL; // long[] state from GHASH obj 1102 fields[argp++] = TypePtr::NOTNULL; // long[] subkeyHtbl from GHASH obj 1103 fields[argp++] = TypePtr::NOTNULL; // byte[] counter from GCTR obj 1104 1105 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1106 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1107 // returning cipher len (int) 1108 fields = TypeTuple::fields(1); 1109 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1110 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1111 return TypeFunc::make(domain, range); 1112 } 1113 1114 /* 1115 * void implCompress(byte[] buf, int ofs) 1116 */ 1117 const TypeFunc* OptoRuntime::digestBase_implCompress_Type(bool is_sha3) { 1118 // create input type (domain) 1119 int num_args = is_sha3 ? 3 : 2; 1120 int argcnt = num_args; 1121 const Type** fields = TypeTuple::fields(argcnt); 1122 int argp = TypeFunc::Parms; 1123 fields[argp++] = TypePtr::NOTNULL; // buf 1124 fields[argp++] = TypePtr::NOTNULL; // state 1125 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1126 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1127 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1128 1129 // no result type needed 1130 fields = TypeTuple::fields(1); 1131 fields[TypeFunc::Parms+0] = nullptr; // void 1132 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1133 return TypeFunc::make(domain, range); 1134 } 1135 1136 /* 1137 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 1138 */ 1139 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type(bool is_sha3) { 1140 // create input type (domain) 1141 int num_args = is_sha3 ? 5 : 4; 1142 int argcnt = num_args; 1143 const Type** fields = TypeTuple::fields(argcnt); 1144 int argp = TypeFunc::Parms; 1145 fields[argp++] = TypePtr::NOTNULL; // buf 1146 fields[argp++] = TypePtr::NOTNULL; // state 1147 if (is_sha3) fields[argp++] = TypeInt::INT; // block_size 1148 fields[argp++] = TypeInt::INT; // ofs 1149 fields[argp++] = TypeInt::INT; // limit 1150 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1151 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1152 1153 // returning ofs (int) 1154 fields = TypeTuple::fields(1); 1155 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1156 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1157 return TypeFunc::make(domain, range); 1158 } 1159 1160 const TypeFunc* OptoRuntime::multiplyToLen_Type() { 1161 // create input type (domain) 1162 int num_args = 6; 1163 int argcnt = num_args; 1164 const Type** fields = TypeTuple::fields(argcnt); 1165 int argp = TypeFunc::Parms; 1166 fields[argp++] = TypePtr::NOTNULL; // x 1167 fields[argp++] = TypeInt::INT; // xlen 1168 fields[argp++] = TypePtr::NOTNULL; // y 1169 fields[argp++] = TypeInt::INT; // ylen 1170 fields[argp++] = TypePtr::NOTNULL; // z 1171 fields[argp++] = TypeInt::INT; // zlen 1172 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1173 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1174 1175 // no result type needed 1176 fields = TypeTuple::fields(1); 1177 fields[TypeFunc::Parms+0] = nullptr; 1178 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1179 return TypeFunc::make(domain, range); 1180 } 1181 1182 const TypeFunc* OptoRuntime::squareToLen_Type() { 1183 // create input type (domain) 1184 int num_args = 4; 1185 int argcnt = num_args; 1186 const Type** fields = TypeTuple::fields(argcnt); 1187 int argp = TypeFunc::Parms; 1188 fields[argp++] = TypePtr::NOTNULL; // x 1189 fields[argp++] = TypeInt::INT; // len 1190 fields[argp++] = TypePtr::NOTNULL; // z 1191 fields[argp++] = TypeInt::INT; // zlen 1192 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1193 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1194 1195 // no result type needed 1196 fields = TypeTuple::fields(1); 1197 fields[TypeFunc::Parms+0] = nullptr; 1198 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1199 return TypeFunc::make(domain, range); 1200 } 1201 1202 // for mulAdd calls, 2 pointers and 3 ints, returning int 1203 const TypeFunc* OptoRuntime::mulAdd_Type() { 1204 // create input type (domain) 1205 int num_args = 5; 1206 int argcnt = num_args; 1207 const Type** fields = TypeTuple::fields(argcnt); 1208 int argp = TypeFunc::Parms; 1209 fields[argp++] = TypePtr::NOTNULL; // out 1210 fields[argp++] = TypePtr::NOTNULL; // in 1211 fields[argp++] = TypeInt::INT; // offset 1212 fields[argp++] = TypeInt::INT; // len 1213 fields[argp++] = TypeInt::INT; // k 1214 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1215 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1216 1217 // returning carry (int) 1218 fields = TypeTuple::fields(1); 1219 fields[TypeFunc::Parms+0] = TypeInt::INT; 1220 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1221 return TypeFunc::make(domain, range); 1222 } 1223 1224 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() { 1225 // create input type (domain) 1226 int num_args = 7; 1227 int argcnt = num_args; 1228 const Type** fields = TypeTuple::fields(argcnt); 1229 int argp = TypeFunc::Parms; 1230 fields[argp++] = TypePtr::NOTNULL; // a 1231 fields[argp++] = TypePtr::NOTNULL; // b 1232 fields[argp++] = TypePtr::NOTNULL; // n 1233 fields[argp++] = TypeInt::INT; // len 1234 fields[argp++] = TypeLong::LONG; // inv 1235 fields[argp++] = Type::HALF; 1236 fields[argp++] = TypePtr::NOTNULL; // result 1237 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1238 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1239 1240 // result type needed 1241 fields = TypeTuple::fields(1); 1242 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1243 1244 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1245 return TypeFunc::make(domain, range); 1246 } 1247 1248 const TypeFunc* OptoRuntime::montgomerySquare_Type() { 1249 // create input type (domain) 1250 int num_args = 6; 1251 int argcnt = num_args; 1252 const Type** fields = TypeTuple::fields(argcnt); 1253 int argp = TypeFunc::Parms; 1254 fields[argp++] = TypePtr::NOTNULL; // a 1255 fields[argp++] = TypePtr::NOTNULL; // n 1256 fields[argp++] = TypeInt::INT; // len 1257 fields[argp++] = TypeLong::LONG; // inv 1258 fields[argp++] = Type::HALF; 1259 fields[argp++] = TypePtr::NOTNULL; // result 1260 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1261 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1262 1263 // result type needed 1264 fields = TypeTuple::fields(1); 1265 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1266 1267 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1268 return TypeFunc::make(domain, range); 1269 } 1270 1271 const TypeFunc * OptoRuntime::bigIntegerShift_Type() { 1272 int argcnt = 5; 1273 const Type** fields = TypeTuple::fields(argcnt); 1274 int argp = TypeFunc::Parms; 1275 fields[argp++] = TypePtr::NOTNULL; // newArr 1276 fields[argp++] = TypePtr::NOTNULL; // oldArr 1277 fields[argp++] = TypeInt::INT; // newIdx 1278 fields[argp++] = TypeInt::INT; // shiftCount 1279 fields[argp++] = TypeInt::INT; // numIter 1280 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1281 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1282 1283 // no result type needed 1284 fields = TypeTuple::fields(1); 1285 fields[TypeFunc::Parms + 0] = nullptr; 1286 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1287 return TypeFunc::make(domain, range); 1288 } 1289 1290 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() { 1291 // create input type (domain) 1292 int num_args = 4; 1293 int argcnt = num_args; 1294 const Type** fields = TypeTuple::fields(argcnt); 1295 int argp = TypeFunc::Parms; 1296 fields[argp++] = TypePtr::NOTNULL; // obja 1297 fields[argp++] = TypePtr::NOTNULL; // objb 1298 fields[argp++] = TypeInt::INT; // length, number of elements 1299 fields[argp++] = TypeInt::INT; // log2scale, element size 1300 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1301 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1302 1303 //return mismatch index (int) 1304 fields = TypeTuple::fields(1); 1305 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1306 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1307 return TypeFunc::make(domain, range); 1308 } 1309 1310 // GHASH block processing 1311 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { 1312 int argcnt = 4; 1313 1314 const Type** fields = TypeTuple::fields(argcnt); 1315 int argp = TypeFunc::Parms; 1316 fields[argp++] = TypePtr::NOTNULL; // state 1317 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1318 fields[argp++] = TypePtr::NOTNULL; // data 1319 fields[argp++] = TypeInt::INT; // blocks 1320 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1321 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1322 1323 // result type needed 1324 fields = TypeTuple::fields(1); 1325 fields[TypeFunc::Parms+0] = nullptr; // void 1326 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1327 return TypeFunc::make(domain, range); 1328 } 1329 1330 // ChaCha20 Block function 1331 const TypeFunc* OptoRuntime::chacha20Block_Type() { 1332 int argcnt = 2; 1333 1334 const Type** fields = TypeTuple::fields(argcnt); 1335 int argp = TypeFunc::Parms; 1336 fields[argp++] = TypePtr::NOTNULL; // state 1337 fields[argp++] = TypePtr::NOTNULL; // result 1338 1339 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1340 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1341 1342 // result type needed 1343 fields = TypeTuple::fields(1); 1344 fields[TypeFunc::Parms + 0] = TypeInt::INT; // key stream outlen as int 1345 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1346 return TypeFunc::make(domain, range); 1347 } 1348 1349 // Base64 encode function 1350 const TypeFunc* OptoRuntime::base64_encodeBlock_Type() { 1351 int argcnt = 6; 1352 1353 const Type** fields = TypeTuple::fields(argcnt); 1354 int argp = TypeFunc::Parms; 1355 fields[argp++] = TypePtr::NOTNULL; // src array 1356 fields[argp++] = TypeInt::INT; // offset 1357 fields[argp++] = TypeInt::INT; // length 1358 fields[argp++] = TypePtr::NOTNULL; // dest array 1359 fields[argp++] = TypeInt::INT; // dp 1360 fields[argp++] = TypeInt::BOOL; // isURL 1361 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1362 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1363 1364 // result type needed 1365 fields = TypeTuple::fields(1); 1366 fields[TypeFunc::Parms + 0] = nullptr; // void 1367 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1368 return TypeFunc::make(domain, range); 1369 } 1370 // Base64 decode function 1371 const TypeFunc* OptoRuntime::base64_decodeBlock_Type() { 1372 int argcnt = 7; 1373 1374 const Type** fields = TypeTuple::fields(argcnt); 1375 int argp = TypeFunc::Parms; 1376 fields[argp++] = TypePtr::NOTNULL; // src array 1377 fields[argp++] = TypeInt::INT; // src offset 1378 fields[argp++] = TypeInt::INT; // src length 1379 fields[argp++] = TypePtr::NOTNULL; // dest array 1380 fields[argp++] = TypeInt::INT; // dest offset 1381 fields[argp++] = TypeInt::BOOL; // isURL 1382 fields[argp++] = TypeInt::BOOL; // isMIME 1383 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1384 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1385 1386 // result type needed 1387 fields = TypeTuple::fields(1); 1388 fields[TypeFunc::Parms + 0] = TypeInt::INT; // count of bytes written to dst 1389 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1390 return TypeFunc::make(domain, range); 1391 } 1392 1393 // Poly1305 processMultipleBlocks function 1394 const TypeFunc* OptoRuntime::poly1305_processBlocks_Type() { 1395 int argcnt = 4; 1396 1397 const Type** fields = TypeTuple::fields(argcnt); 1398 int argp = TypeFunc::Parms; 1399 fields[argp++] = TypePtr::NOTNULL; // input array 1400 fields[argp++] = TypeInt::INT; // input length 1401 fields[argp++] = TypePtr::NOTNULL; // accumulator array 1402 fields[argp++] = TypePtr::NOTNULL; // r array 1403 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1404 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1405 1406 // result type needed 1407 fields = TypeTuple::fields(1); 1408 fields[TypeFunc::Parms + 0] = nullptr; // void 1409 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1410 return TypeFunc::make(domain, range); 1411 } 1412 1413 //------------- Interpreter state access for on stack replacement 1414 const TypeFunc* OptoRuntime::osr_end_Type() { 1415 // create input type (domain) 1416 const Type **fields = TypeTuple::fields(1); 1417 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1418 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1419 1420 // create result type 1421 fields = TypeTuple::fields(1); 1422 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1423 fields[TypeFunc::Parms+0] = nullptr; // void 1424 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1425 return TypeFunc::make(domain, range); 1426 } 1427 1428 //------------------------------------------------------------------------------------- 1429 // register policy 1430 1431 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1432 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1433 switch (register_save_policy[reg]) { 1434 case 'C': return false; //SOC 1435 case 'E': return true ; //SOE 1436 case 'N': return false; //NS 1437 case 'A': return false; //AS 1438 } 1439 ShouldNotReachHere(); 1440 return false; 1441 } 1442 1443 //----------------------------------------------------------------------- 1444 // Exceptions 1445 // 1446 1447 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1448 1449 // The method is an entry that is always called by a C++ method not 1450 // directly from compiled code. Compiled code will call the C++ method following. 1451 // We can't allow async exception to be installed during exception processing. 1452 JRT_ENTRY_NO_ASYNC_PROF(address, OptoRuntime, handle_exception_C_helper, OptoRuntime::handle_exception_C_helper(JavaThread* current, nmethod* &nm)) 1453 // The frame we rethrow the exception to might not have been processed by the GC yet. 1454 // The stack watermark barrier takes care of detecting that and ensuring the frame 1455 // has updated oops. 1456 StackWatermarkSet::after_unwind(current); 1457 1458 // Do not confuse exception_oop with pending_exception. The exception_oop 1459 // is only used to pass arguments into the method. Not for general 1460 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1461 // the runtime stubs checks this on exit. 1462 assert(current->exception_oop() != nullptr, "exception oop is found"); 1463 address handler_address = nullptr; 1464 1465 Handle exception(current, current->exception_oop()); 1466 address pc = current->exception_pc(); 1467 1468 // Clear out the exception oop and pc since looking up an 1469 // exception handler can cause class loading, which might throw an 1470 // exception and those fields are expected to be clear during 1471 // normal bytecode execution. 1472 current->clear_exception_oop_and_pc(); 1473 1474 LogTarget(Info, exceptions) lt; 1475 if (lt.is_enabled()) { 1476 ResourceMark rm; 1477 LogStream ls(lt); 1478 trace_exception(&ls, exception(), pc, ""); 1479 } 1480 1481 // for AbortVMOnException flag 1482 Exceptions::debug_check_abort(exception); 1483 1484 #ifdef ASSERT 1485 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1486 // should throw an exception here 1487 ShouldNotReachHere(); 1488 } 1489 #endif 1490 1491 // new exception handling: this method is entered only from adapters 1492 // exceptions from compiled java methods are handled in compiled code 1493 // using rethrow node 1494 1495 nm = CodeCache::find_nmethod(pc); 1496 assert(nm != nullptr, "No NMethod found"); 1497 if (nm->is_native_method()) { 1498 fatal("Native method should not have path to exception handling"); 1499 } else { 1500 // we are switching to old paradigm: search for exception handler in caller_frame 1501 // instead in exception handler of caller_frame.sender() 1502 1503 if (JvmtiExport::can_post_on_exceptions()) { 1504 // "Full-speed catching" is not necessary here, 1505 // since we're notifying the VM on every catch. 1506 // Force deoptimization and the rest of the lookup 1507 // will be fine. 1508 deoptimize_caller_frame(current); 1509 } 1510 1511 // Check the stack guard pages. If enabled, look for handler in this frame; 1512 // otherwise, forcibly unwind the frame. 1513 // 1514 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1515 bool force_unwind = !current->stack_overflow_state()->reguard_stack(); 1516 bool deopting = false; 1517 if (nm->is_deopt_pc(pc)) { 1518 deopting = true; 1519 RegisterMap map(current, 1520 RegisterMap::UpdateMap::skip, 1521 RegisterMap::ProcessFrames::include, 1522 RegisterMap::WalkContinuation::skip); 1523 frame deoptee = current->last_frame().sender(&map); 1524 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1525 // Adjust the pc back to the original throwing pc 1526 pc = deoptee.pc(); 1527 } 1528 1529 // If we are forcing an unwind because of stack overflow then deopt is 1530 // irrelevant since we are throwing the frame away anyway. 1531 1532 if (deopting && !force_unwind) { 1533 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1534 } else { 1535 1536 handler_address = 1537 force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); 1538 1539 if (handler_address == nullptr) { 1540 bool recursive_exception = false; 1541 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1542 assert (handler_address != nullptr, "must have compiled handler"); 1543 // Update the exception cache only when the unwind was not forced 1544 // and there didn't happen another exception during the computation of the 1545 // compiled exception handler. Checking for exception oop equality is not 1546 // sufficient because some exceptions are pre-allocated and reused. 1547 if (!force_unwind && !recursive_exception) { 1548 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1549 } 1550 } else { 1551 #ifdef ASSERT 1552 bool recursive_exception = false; 1553 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1554 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1555 p2i(handler_address), p2i(computed_address)); 1556 #endif 1557 } 1558 } 1559 1560 current->set_exception_pc(pc); 1561 current->set_exception_handler_pc(handler_address); 1562 1563 // Check if the exception PC is a MethodHandle call site. 1564 current->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1565 } 1566 1567 // Restore correct return pc. Was saved above. 1568 current->set_exception_oop(exception()); 1569 return handler_address; 1570 1571 JRT_END 1572 1573 // We are entering here from exception_blob 1574 // If there is a compiled exception handler in this method, we will continue there; 1575 // otherwise we will unwind the stack and continue at the caller of top frame method 1576 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1577 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1578 // we looked up the handler for has been deoptimized in the meantime. If it has been 1579 // we must not use the handler and instead return the deopt blob. 1580 address OptoRuntime::handle_exception_C(JavaThread* current) { 1581 // 1582 // We are in Java not VM and in debug mode we have a NoHandleMark 1583 // 1584 #ifndef PRODUCT 1585 SharedRuntime::_find_handler_ctr++; // find exception handler 1586 #endif 1587 debug_only(NoHandleMark __hm;) 1588 nmethod* nm = nullptr; 1589 address handler_address = nullptr; 1590 { 1591 // Enter the VM 1592 1593 ResetNoHandleMark rnhm; 1594 handler_address = handle_exception_C_helper(current, nm); 1595 } 1596 1597 // Back in java: Use no oops, DON'T safepoint 1598 1599 // Now check to see if the handler we are returning is in a now 1600 // deoptimized frame 1601 1602 if (nm != nullptr) { 1603 RegisterMap map(current, 1604 RegisterMap::UpdateMap::skip, 1605 RegisterMap::ProcessFrames::skip, 1606 RegisterMap::WalkContinuation::skip); 1607 frame caller = current->last_frame().sender(&map); 1608 #ifdef ASSERT 1609 assert(caller.is_compiled_frame(), "must be"); 1610 #endif // ASSERT 1611 if (caller.is_deoptimized_frame()) { 1612 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1613 } 1614 } 1615 return handler_address; 1616 } 1617 1618 //------------------------------rethrow---------------------------------------- 1619 // We get here after compiled code has executed a 'RethrowNode'. The callee 1620 // is either throwing or rethrowing an exception. The callee-save registers 1621 // have been restored, synchronized objects have been unlocked and the callee 1622 // stack frame has been removed. The return address was passed in. 1623 // Exception oop is passed as the 1st argument. This routine is then called 1624 // from the stub. On exit, we know where to jump in the caller's code. 1625 // After this C code exits, the stub will pop his frame and end in a jump 1626 // (instead of a return). We enter the caller's default handler. 1627 // 1628 // This must be JRT_LEAF: 1629 // - caller will not change its state as we cannot block on exit, 1630 // therefore raw_exception_handler_for_return_address is all it takes 1631 // to handle deoptimized blobs 1632 // 1633 // However, there needs to be a safepoint check in the middle! So compiled 1634 // safepoints are completely watertight. 1635 // 1636 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1637 // 1638 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1639 // 1640 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1641 // ret_pc will have been loaded from the stack, so for AArch64 will be signed. 1642 AARCH64_PORT_ONLY(ret_pc = pauth_strip_verifiable(ret_pc)); 1643 1644 #ifndef PRODUCT 1645 SharedRuntime::_rethrow_ctr++; // count rethrows 1646 #endif 1647 assert (exception != nullptr, "should have thrown a NullPointerException"); 1648 #ifdef ASSERT 1649 if (!(exception->is_a(vmClasses::Throwable_klass()))) { 1650 // should throw an exception here 1651 ShouldNotReachHere(); 1652 } 1653 #endif 1654 1655 thread->set_vm_result(exception); 1656 // Frame not compiled (handles deoptimization blob) 1657 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1658 } 1659 1660 1661 const TypeFunc *OptoRuntime::rethrow_Type() { 1662 // create input type (domain) 1663 const Type **fields = TypeTuple::fields(1); 1664 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1665 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1666 1667 // create result type (range) 1668 fields = TypeTuple::fields(1); 1669 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1670 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1671 1672 return TypeFunc::make(domain, range); 1673 } 1674 1675 1676 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1677 // Deoptimize the caller before continuing, as the compiled 1678 // exception handler table may not be valid. 1679 if (!StressCompiledExceptionHandlers && doit) { 1680 deoptimize_caller_frame(thread); 1681 } 1682 } 1683 1684 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1685 // Called from within the owner thread, so no need for safepoint 1686 RegisterMap reg_map(thread, 1687 RegisterMap::UpdateMap::include, 1688 RegisterMap::ProcessFrames::include, 1689 RegisterMap::WalkContinuation::skip); 1690 frame stub_frame = thread->last_frame(); 1691 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1692 frame caller_frame = stub_frame.sender(®_map); 1693 1694 // Deoptimize the caller frame. 1695 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1696 } 1697 1698 1699 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1700 // Called from within the owner thread, so no need for safepoint 1701 RegisterMap reg_map(thread, 1702 RegisterMap::UpdateMap::include, 1703 RegisterMap::ProcessFrames::include, 1704 RegisterMap::WalkContinuation::skip); 1705 frame stub_frame = thread->last_frame(); 1706 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1707 frame caller_frame = stub_frame.sender(®_map); 1708 return caller_frame.is_deoptimized_frame(); 1709 } 1710 1711 1712 const TypeFunc *OptoRuntime::register_finalizer_Type() { 1713 // create input type (domain) 1714 const Type **fields = TypeTuple::fields(1); 1715 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1716 // // The JavaThread* is passed to each routine as the last argument 1717 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1718 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1719 1720 // create result type (range) 1721 fields = TypeTuple::fields(0); 1722 1723 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1724 1725 return TypeFunc::make(domain,range); 1726 } 1727 1728 const TypeFunc *OptoRuntime::class_init_barrier_Type() { 1729 // create input type (domain) 1730 const Type** fields = TypeTuple::fields(1); 1731 fields[TypeFunc::Parms+0] = TypeKlassPtr::NOTNULL; 1732 // // The JavaThread* is passed to each routine as the last argument 1733 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1734 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1735 1736 // create result type (range) 1737 fields = TypeTuple::fields(0); 1738 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 1739 return TypeFunc::make(domain,range); 1740 } 1741 1742 #if INCLUDE_JFR 1743 const TypeFunc *OptoRuntime::class_id_load_barrier_Type() { 1744 // create input type (domain) 1745 const Type **fields = TypeTuple::fields(1); 1746 fields[TypeFunc::Parms+0] = TypeInstPtr::KLASS; 1747 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 1, fields); 1748 1749 // create result type (range) 1750 fields = TypeTuple::fields(0); 1751 1752 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms + 0, fields); 1753 1754 return TypeFunc::make(domain,range); 1755 } 1756 #endif 1757 1758 //----------------------------------------------------------------------------- 1759 // Dtrace support. entry and exit probes have the same signature 1760 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() { 1761 // create input type (domain) 1762 const Type **fields = TypeTuple::fields(2); 1763 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1764 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1765 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1766 1767 // create result type (range) 1768 fields = TypeTuple::fields(0); 1769 1770 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1771 1772 return TypeFunc::make(domain,range); 1773 } 1774 1775 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { 1776 // create input type (domain) 1777 const Type **fields = TypeTuple::fields(2); 1778 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1779 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1780 1781 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1782 1783 // create result type (range) 1784 fields = TypeTuple::fields(0); 1785 1786 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1787 1788 return TypeFunc::make(domain,range); 1789 } 1790 1791 1792 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, register_finalizer, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* current)) 1793 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1794 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1795 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1796 JRT_END 1797 1798 JRT_ENTRY_NO_ASYNC_PROF(void, OptoRuntime, class_init_barrier, OptoRuntime::class_init_barrier(Klass* k, JavaThread* current)) 1799 InstanceKlass* ik = InstanceKlass::cast(k); 1800 if (ik->should_be_initialized()) { 1801 ik->initialize(CHECK); 1802 } else if (UsePerfData) { 1803 _perf_OptoRuntime_class_init_barrier_redundant_count->inc(); 1804 } 1805 JRT_END 1806 1807 //----------------------------------------------------------------------------- 1808 1809 NamedCounter * volatile OptoRuntime::_named_counters = nullptr; 1810 1811 // 1812 // dump the collected NamedCounters. 1813 // 1814 void OptoRuntime::print_named_counters() { 1815 int total_lock_count = 0; 1816 int eliminated_lock_count = 0; 1817 1818 NamedCounter* c = _named_counters; 1819 while (c) { 1820 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1821 int count = c->count(); 1822 if (count > 0) { 1823 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1824 if (Verbose) { 1825 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1826 } 1827 total_lock_count += count; 1828 if (eliminated) { 1829 eliminated_lock_count += count; 1830 } 1831 } 1832 #if INCLUDE_RTM_OPT 1833 } else if (c->tag() == NamedCounter::RTMLockingCounter) { 1834 RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters(); 1835 if (rlc->nonzero()) { 1836 tty->print_cr("%s", c->name()); 1837 rlc->print_on(tty); 1838 } 1839 #endif 1840 } 1841 c = c->next(); 1842 } 1843 if (total_lock_count > 0) { 1844 tty->print_cr("dynamic locks: %d", total_lock_count); 1845 if (eliminated_lock_count) { 1846 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1847 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1848 } 1849 } 1850 } 1851 1852 // 1853 // Allocate a new NamedCounter. The JVMState is used to generate the 1854 // name which consists of method@line for the inlining tree. 1855 // 1856 1857 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1858 int max_depth = youngest_jvms->depth(); 1859 1860 // Visit scopes from youngest to oldest. 1861 bool first = true; 1862 stringStream st; 1863 for (int depth = max_depth; depth >= 1; depth--) { 1864 JVMState* jvms = youngest_jvms->of_depth(depth); 1865 ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; 1866 if (!first) { 1867 st.print(" "); 1868 } else { 1869 first = false; 1870 } 1871 int bci = jvms->bci(); 1872 if (bci < 0) bci = 0; 1873 if (m != nullptr) { 1874 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 1875 } else { 1876 st.print("no method"); 1877 } 1878 st.print("@%d", bci); 1879 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1880 } 1881 NamedCounter* c; 1882 if (tag == NamedCounter::RTMLockingCounter) { 1883 c = new RTMLockingNamedCounter(st.freeze()); 1884 } else { 1885 c = new NamedCounter(st.freeze(), tag); 1886 } 1887 1888 // atomically add the new counter to the head of the list. We only 1889 // add counters so this is safe. 1890 NamedCounter* head; 1891 do { 1892 c->set_next(nullptr); 1893 head = _named_counters; 1894 c->set_next(head); 1895 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 1896 return c; 1897 } 1898 1899 int trace_exception_counter = 0; 1900 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 1901 trace_exception_counter++; 1902 stringStream tempst; 1903 1904 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 1905 exception_oop->print_value_on(&tempst); 1906 tempst.print(" in "); 1907 CodeBlob* blob = CodeCache::find_blob(exception_pc); 1908 if (blob->is_nmethod()) { 1909 blob->as_nmethod()->method()->print_value_on(&tempst); 1910 } else if (blob->is_runtime_stub()) { 1911 tempst.print("<runtime-stub>"); 1912 } else { 1913 tempst.print("<unknown>"); 1914 } 1915 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 1916 tempst.print("]"); 1917 1918 st->print_raw_cr(tempst.freeze()); 1919 } 1920 1921 #define DO_COUNTERS2(macro2, macro1) \ 1922 macro2(OptoRuntime, new_instance_C) \ 1923 macro2(OptoRuntime, new_array_C) \ 1924 macro2(OptoRuntime, new_array_nozero_C) \ 1925 macro2(OptoRuntime, multianewarray2_C) \ 1926 macro2(OptoRuntime, multianewarray3_C) \ 1927 macro2(OptoRuntime, multianewarray4_C) \ 1928 macro2(OptoRuntime, multianewarrayN_C) \ 1929 macro2(OptoRuntime, monitor_notify_C) \ 1930 macro2(OptoRuntime, monitor_notifyAll_C) \ 1931 macro2(OptoRuntime, handle_exception_C_helper) \ 1932 macro2(OptoRuntime, register_finalizer) \ 1933 macro2(OptoRuntime, class_init_barrier) \ 1934 macro1(OptoRuntime, class_init_barrier_redundant) 1935 1936 #define INIT_COUNTER_TIME_AND_CNT(sub, name) \ 1937 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \ 1938 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 1939 1940 #define INIT_COUNTER_CNT(sub, name) \ 1941 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count"); 1942 1943 void OptoRuntime::init_counters() { 1944 assert(CompilerConfig::is_c2_enabled(), ""); 1945 1946 if (UsePerfData) { 1947 EXCEPTION_MARK; 1948 1949 DO_COUNTERS2(INIT_COUNTER_TIME_AND_CNT, INIT_COUNTER_CNT) 1950 1951 if (HAS_PENDING_EXCEPTION) { 1952 vm_exit_during_initialization("jvm_perf_init failed unexpectedly"); 1953 } 1954 } 1955 } 1956 #undef INIT_COUNTER_TIME_AND_CNT 1957 #undef INIT_COUNTER_CNT 1958 1959 #define PRINT_COUNTER_TIME_AND_CNT(sub, name) { \ 1960 jlong count = _perf_##sub##_##name##_count->get_value(); \ 1961 if (count > 0) { \ 1962 st->print_cr(" %-30s = %4ldms (elapsed) %4ldms (thread) (%5ld events)", #sub "::" #name, \ 1963 _perf_##sub##_##name##_timer->elapsed_counter_value_ms(), \ 1964 _perf_##sub##_##name##_timer->thread_counter_value_ms(), \ 1965 count); \ 1966 }} 1967 1968 #define PRINT_COUNTER_CNT(sub, name) { \ 1969 jlong count = _perf_##sub##_##name##_count->get_value(); \ 1970 if (count > 0) { \ 1971 st->print_cr(" %-30s = %5ld events", #name, count); \ 1972 }} 1973 1974 void OptoRuntime::print_counters_on(outputStream* st) { 1975 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c2_enabled()) { 1976 DO_COUNTERS2(PRINT_COUNTER_TIME_AND_CNT, PRINT_COUNTER_CNT) 1977 } else { 1978 st->print_cr(" OptoRuntime: no info (%s is disabled)", 1979 (!CompilerConfig::is_c2_enabled() ? "C2" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData"))); 1980 } 1981 } 1982 1983 #undef PRINT_COUNTER_TIME_AND_CNT 1984 #undef PRINT_COUNTER_CNT 1985 #undef DO_COUNTERS2