461 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
462 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
463 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
464
465 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
466 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
467 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
468 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
469 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
470
471 case vmIntrinsics::_loadFence:
472 case vmIntrinsics::_storeFence:
473 case vmIntrinsics::_storeStoreFence:
474 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
475
476 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
477
478 case vmIntrinsics::_currentCarrierThread: return inline_native_currentCarrierThread();
479 case vmIntrinsics::_currentThread: return inline_native_currentThread();
480 case vmIntrinsics::_setCurrentThread: return inline_native_setCurrentThread();
481
482 case vmIntrinsics::_scopedValueCache: return inline_native_scopedValueCache();
483 case vmIntrinsics::_setScopedValueCache: return inline_native_setScopedValueCache();
484
485 #if INCLUDE_JVMTI
486 case vmIntrinsics::_notifyJvmtiVThreadStart: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_start()),
487 "notifyJvmtiStart", true, false);
488 case vmIntrinsics::_notifyJvmtiVThreadEnd: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_end()),
489 "notifyJvmtiEnd", false, true);
490 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
491 "notifyJvmtiMount", false, false);
492 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
493 "notifyJvmtiUnmount", false, false);
494 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
495 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
496 #endif
497
498 #ifdef JFR_HAVE_INTRINSICS
499 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
500 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
3620
3621 //------------------------inline_native_currentThread------------------
3622 bool LibraryCallKit::inline_native_currentThread() {
3623 Node* junk = nullptr;
3624 set_result(generate_virtual_thread(junk));
3625 return true;
3626 }
3627
3628 //------------------------inline_native_setVthread------------------
3629 bool LibraryCallKit::inline_native_setCurrentThread() {
3630 assert(C->method()->changes_current_thread(),
3631 "method changes current Thread but is not annotated ChangesCurrentThread");
3632 Node* arr = argument(1);
3633 Node* thread = _gvn.transform(new ThreadLocalNode());
3634 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3635 Node* thread_obj_handle
3636 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3637 thread_obj_handle = _gvn.transform(thread_obj_handle);
3638 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3639 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3640 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3641 return true;
3642 }
3643
3644 const Type* LibraryCallKit::scopedValueCache_type() {
3645 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3646 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3647 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3648
3649 // Because we create the scopedValue cache lazily we have to make the
3650 // type of the result BotPTR.
3651 bool xk = etype->klass_is_exact();
3652 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3653 return objects_type;
3654 }
3655
3656 Node* LibraryCallKit::scopedValueCache_helper() {
3657 Node* thread = _gvn.transform(new ThreadLocalNode());
3658 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3659 // We cannot use immutable_memory() because we might flip onto a
3660 // different carrier thread, at which point we'll need to use that
3661 // carrier thread's cache.
|
461 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
462 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
463 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
464
465 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
466 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
467 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
468 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
469 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
470
471 case vmIntrinsics::_loadFence:
472 case vmIntrinsics::_storeFence:
473 case vmIntrinsics::_storeStoreFence:
474 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
475
476 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
477
478 case vmIntrinsics::_currentCarrierThread: return inline_native_currentCarrierThread();
479 case vmIntrinsics::_currentThread: return inline_native_currentThread();
480 case vmIntrinsics::_setCurrentThread: return inline_native_setCurrentThread();
481 case vmIntrinsics::_setLockId: return inline_native_setLockId();
482
483 case vmIntrinsics::_scopedValueCache: return inline_native_scopedValueCache();
484 case vmIntrinsics::_setScopedValueCache: return inline_native_setScopedValueCache();
485
486 #if INCLUDE_JVMTI
487 case vmIntrinsics::_notifyJvmtiVThreadStart: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_start()),
488 "notifyJvmtiStart", true, false);
489 case vmIntrinsics::_notifyJvmtiVThreadEnd: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_end()),
490 "notifyJvmtiEnd", false, true);
491 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
492 "notifyJvmtiMount", false, false);
493 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
494 "notifyJvmtiUnmount", false, false);
495 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
496 case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
497 #endif
498
499 #ifdef JFR_HAVE_INTRINSICS
500 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
501 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
3621
3622 //------------------------inline_native_currentThread------------------
3623 bool LibraryCallKit::inline_native_currentThread() {
3624 Node* junk = nullptr;
3625 set_result(generate_virtual_thread(junk));
3626 return true;
3627 }
3628
3629 //------------------------inline_native_setVthread------------------
3630 bool LibraryCallKit::inline_native_setCurrentThread() {
3631 assert(C->method()->changes_current_thread(),
3632 "method changes current Thread but is not annotated ChangesCurrentThread");
3633 Node* arr = argument(1);
3634 Node* thread = _gvn.transform(new ThreadLocalNode());
3635 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3636 Node* thread_obj_handle
3637 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3638 thread_obj_handle = _gvn.transform(thread_obj_handle);
3639 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3640 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3641
3642 // Change the lock_id of the JavaThread
3643 Node* tid = load_field_from_object(arr, "tid", "J");
3644 Node* thread_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::lock_id_offset()));
3645 Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, Compile::AliasIdxRaw, MemNode::unordered, true);
3646
3647 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3648 return true;
3649 }
3650
3651 bool LibraryCallKit::inline_native_setLockId() {
3652 Node* thread = _gvn.transform(new ThreadLocalNode());
3653 Node* thread_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::lock_id_offset()));
3654 Node* tid_memory = store_to_memory(control(), thread_id_offset, ConvL2X(argument(0)), T_LONG, Compile::AliasIdxRaw, MemNode::unordered, true);
3655 return true;
3656 }
3657
3658 const Type* LibraryCallKit::scopedValueCache_type() {
3659 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3660 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3661 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3662
3663 // Because we create the scopedValue cache lazily we have to make the
3664 // type of the result BotPTR.
3665 bool xk = etype->klass_is_exact();
3666 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3667 return objects_type;
3668 }
3669
3670 Node* LibraryCallKit::scopedValueCache_helper() {
3671 Node* thread = _gvn.transform(new ThreadLocalNode());
3672 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3673 // We cannot use immutable_memory() because we might flip onto a
3674 // different carrier thread, at which point we'll need to use that
3675 // carrier thread's cache.
|