< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page
*** 57,10 ***
--- 57,11 ---
  #include "utilities/macros.hpp"
  #include "utilities/powerOfTwo.hpp"
  
  #if INCLUDE_JFR
  #include "jfr/jfr.hpp"
+ #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp" // FIXME
  #endif
  
  //---------------------------make_vm_intrinsic----------------------------
  CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
    vmIntrinsicID id = m->intrinsic_id();

*** 468,15 ***
    case vmIntrinsics::_storeFence:
    case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
  
    case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
  
    case vmIntrinsics::_currentThread:            return inline_native_currentThread();
  
  #ifdef JFR_HAVE_INTRINSICS
    case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
-   case vmIntrinsics::_getClassId:               return inline_native_classID();
    case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
  #endif
    case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
    case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
    case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
--- 469,19 ---
    case vmIntrinsics::_storeFence:
    case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
  
    case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
  
+   case vmIntrinsics::_currentThread0:           return inline_native_currentThread0();
    case vmIntrinsics::_currentThread:            return inline_native_currentThread();
+   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
+ 
+   case vmIntrinsics::_scopeLocalCache:          return inline_native_scopeLocalCache();
+   case vmIntrinsics::_setScopeLocalCache:       return inline_native_setScopeLocalCache();
  
  #ifdef JFR_HAVE_INTRINSICS
    case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
    case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
  #endif
    case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
    case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
    case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();

*** 622,10 ***
--- 627,13 ---
  
    case vmIntrinsics::_fmaD:
    case vmIntrinsics::_fmaF:
      return inline_fma(intrinsic_id());
  
+   case vmIntrinsics::_Continuation_doYield:
+     return inline_continuation_do_yield();
+ 
    case vmIntrinsics::_isDigit:
    case vmIntrinsics::_isLowerCase:
    case vmIntrinsics::_isUpperCase:
    case vmIntrinsics::_isWhitespace:
      return inline_character_compare(intrinsic_id());

*** 866,22 ***
      uncommon_trap(Deoptimization::Reason_intrinsic,
                    Deoptimization::Action_maybe_recompile);
    }
  }
  
! //--------------------------generate_current_thread--------------------
! Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
!   ciKlass*    thread_klass = env()->Thread_klass();
!   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
    Node* thread = _gvn.transform(new ThreadLocalNode());
!   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
    tls_output = thread;
!   Node* thread_obj_handle = LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
    thread_obj_handle = _gvn.transform(thread_obj_handle);
!   return access_load(thread_obj_handle, thread_type, T_OBJECT, IN_NATIVE | C2_IMMUTABLE_MEMORY);
  }
  
  
  //------------------------------make_string_method_node------------------------
  // Helper method for String intrinsic functions. This version is called with
  // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
  // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
--- 874,45 ---
      uncommon_trap(Deoptimization::Reason_intrinsic,
                    Deoptimization::Action_maybe_recompile);
    }
  }
  
! Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
!                                             bool is_immutable) {
!   ciKlass* thread_klass = env()->Thread_klass();
!   const Type* thread_type
+     = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
+ 
    Node* thread = _gvn.transform(new ThreadLocalNode());
!   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(handle_offset));
    tls_output = thread;
! 
+   Node* thread_obj_handle
+     = (is_immutable
+       ? LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
+         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
+       : make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
    thread_obj_handle = _gvn.transform(thread_obj_handle);
! 
+   DecoratorSet decorators = IN_NATIVE;
+   if (is_immutable) {
+     decorators |= C2_IMMUTABLE_MEMORY;
+   }
+   return access_load(thread_obj_handle, thread_type, T_OBJECT, decorators);
  }
  
+ //--------------------------generate_current_thread--------------------
+ Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
+   return current_thread_helper(tls_output, JavaThread::threadObj_offset(),
+                                /*is_immutable*/false);
+ }
+ 
+ //--------------------------generate_virtual_thread--------------------
+ Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) {
+   return current_thread_helper(tls_output, JavaThread::vthread_offset(),
+                                !C->method()->changes_current_thread());
+ }
  
  //------------------------------make_string_method_node------------------------
  // Helper method for String intrinsic functions. This version is called with
  // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
  // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes

*** 2892,53 ***
    set_result(ideal.value(result));
  #undef __
    return true;
  }
  
  bool LibraryCallKit::inline_native_getEventWriter() {
    Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
  
!   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
!                                   in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
  
    Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
  
!   Node* jobj_cmp_null = _gvn.transform( new CmpPNode(jobj, null()) );
!   Node* test_jobj_eq_null  = _gvn.transform( new BoolNode(jobj_cmp_null, BoolTest::eq) );
  
!   IfNode* iff_jobj_null =
!     create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);
  
!   enum { _normal_path = 1,
!          _null_path = 2,
!          PATH_LIMIT };
  
!   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
!   PhiNode*    result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
  
!   Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
!   result_rgn->init_req(_null_path, jobj_is_null);
!   result_val->init_req(_null_path, null());
  
!   Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
!   set_control(jobj_is_not_null);
!   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
!                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
!   result_rgn->init_req(_normal_path, control());
!   result_val->init_req(_normal_path, res);
  
!   set_result(result_rgn, result_val);
  
    return true;
  }
  
! #endif // JFR_HAVE_INTRINSICS
  
  //------------------------inline_native_currentThread------------------
  bool LibraryCallKit::inline_native_currentThread() {
    Node* junk = NULL;
!   set_result(generate_current_thread(junk));
    return true;
  }
  
  //---------------------------load_mirror_from_klass----------------------------
  // Given a klass oop, load its java mirror (a java.lang.Class oop).
--- 2923,346 ---
    set_result(ideal.value(result));
  #undef __
    return true;
  }
  
+ /*
+     jobject h_event_writer = Thread::jfr_thread_local()->java_event_writer();
+     if (h_event_writer == NULL) {
+       return NULL;
+     }
+     oop threadObj = Thread::threadObj();
+     oop vthread = java_lang_Thread::vthread(threadObj);
+     traceid tid;
+     if (vthread != threadObj) {  // i.e. current thread is virtual
+       traceid value = java_lang_VirtualThread::tid(vthread);
+       tid = value & tid_mask;
+       traceid epoch = value >> epoch_shift;
+       traceid current_epoch = JfrTraceIdEpoch::current_generation();
+       if (epoch != current_epoch) {
+         traceid update_value = current_epoch << epoch_shift;
+         update_value |= tid;
+         java_lang_VirtualThread::set_tid(vthread, update_value);
+         write_checkpoint(tid);
+       }
+     } else {
+       tid = java_lang_Thread::tid(threadObj);
+     }
+     oop event_writer = JNIHandles::resolve_non_null(h_event_writer);
+     traceid tid_in_event_writer = getField(event_writer, "threadID");
+     if (tid_in_event_writer != tid) {
+       setField(event_writer, "threadID", tid);
+     }
+     return event_writer;
+ 
+  */
  bool LibraryCallKit::inline_native_getEventWriter() {
+   enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
+ 
+   // save input memory and i_o state
+   Node* input_memory_state = reset_memory();
+   set_all_memory(input_memory_state);
+   Node* input_io_state = i_o();
+ 
+   // TLS
    Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
  
!   // load offset of jfr_thread_local
!   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
  
+   // Load eventwriter jobject handle from the jfr_thread_local
    Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
  
!   // Null check the jobject handle
!   Node* jobj_cmp_null = _gvn.transform(new CmpPNode(jobj, null()));
+   Node* test_jobj_ne_null = _gvn.transform(new BoolNode(jobj_cmp_null, BoolTest::ne));
+   IfNode* iff_jobj_ne_null = create_and_map_if(control(), test_jobj_ne_null, PROB_MAX, COUNT_UNKNOWN);
+ 
+   // false path, jobj is null
+   Node* jobj_is_null = _gvn.transform(new IfFalseNode(iff_jobj_ne_null));
+ 
+   // true path, jobj is not null
+   Node* jobj_is_not_null = _gvn.transform(new IfTrueNode(iff_jobj_ne_null));
+ 
+   // load the threadObj for the CarrierThread
+   Node* const threadObj = generate_current_thread(tls_ptr);
+ 
+   // load the vthread field
+   Node* const vthreadObj = generate_virtual_thread(tls_ptr);
+ 
+   // vthread != threadObj
+   RegionNode* threadObj_result_rgn = new RegionNode(PATH_LIMIT);
+   record_for_igvn(threadObj_result_rgn);
+   PhiNode*    thread_id_mem = new PhiNode(threadObj_result_rgn, Type::MEMORY, TypePtr::BOTTOM);
+   PhiNode*    thread_id_io = new PhiNode(threadObj_result_rgn, Type::ABIO);
+   record_for_igvn(thread_id_io);
+   PhiNode*    thread_id_val = new PhiNode(threadObj_result_rgn, TypeLong::LONG);
+   record_for_igvn(thread_id_val);
+ 
+   // If vthread != thread, this is a virtual thread
+   Node* vthreadObj_cmp_threadObj = _gvn.transform(new CmpPNode(vthreadObj, threadObj));
+   Node* test_vthreadObj_ne_threadObj = _gvn.transform(new BoolNode(vthreadObj_cmp_threadObj, BoolTest::ne));
+   IfNode* iff_vthreadObj_ne_threadObj =
+     create_and_map_if(jobj_is_not_null, test_vthreadObj_ne_threadObj, PROB_FAIR, COUNT_UNKNOWN);
+ 
+   // false branch, fallback to threadObj
+   Node* virtual_thread_is_threadObj = _gvn.transform(new IfFalseNode(iff_vthreadObj_ne_threadObj));
+   Node* thread_obj_tid = load_field_from_object(threadObj, "tid", "J");
+ 
+   // true branch, this is a virtual thread
+   Node* virtual_thread_is_not_threadObj = _gvn.transform(new IfTrueNode(iff_vthreadObj_ne_threadObj));
+   // read the thread id from the vthread
+   Node* vthread_obj_tid_value = load_field_from_object(vthreadObj, "tid", "J");
+ 
+   // bit shift and mask
+   Node* const epoch_shift = _gvn.intcon(jfr_epoch_shift);
+   Node* const tid_mask = _gvn.longcon(jfr_id_mask);
+ 
+   // mask off the epoch information from the thread id
+   Node* const vthread_obj_tid = _gvn.transform(new AndLNode(vthread_obj_tid_value, tid_mask));
+   // shift thread id value down for last epoch
+   Node* const vthread_epoch = _gvn.transform(new URShiftLNode(vthread_obj_tid_value, epoch_shift));
+ 
+   // epoch compare
+   RegionNode* epoch_compare_rgn = new RegionNode(PATH_LIMIT);
+   record_for_igvn(epoch_compare_rgn);
+   PhiNode*    epoch_compare_mem = new PhiNode(epoch_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
+   record_for_igvn(epoch_compare_mem);
+   PhiNode*    epoch_compare_io = new PhiNode(epoch_compare_rgn, Type::ABIO);
+   record_for_igvn(epoch_compare_io);
+ 
+   Node* saved_ctl = control();
+   set_control(virtual_thread_is_not_threadObj);
+   TypePtr* const no_memory_effects = NULL;
+   // make a runtime call to get the current epoch
+   Node* call_epoch_generation = make_runtime_call(RC_LEAF | RC_NO_FP,
+                                                   OptoRuntime::void_long_Type(),
+                                                   (address)JFR_EPOCH_GENERATION_FUNCTION,
+                                                   "epoch_generation", no_memory_effects);
+   // restore
+   set_control(saved_ctl);
+ 
+   Node* current_epoch_gen_control = _gvn.transform(new ProjNode(call_epoch_generation, TypeFunc::Control));
+   Node* current_epoch_gen_value = _gvn.transform(new ProjNode(call_epoch_generation, TypeFunc::Parms));
+ 
+   // compare epoch in vthread to the current epoch generation
+   Node* const epoch_cmp = _gvn.transform(new CmpLNode(current_epoch_gen_value, vthread_epoch));
+   Node* test_epoch_ne = _gvn.transform(new BoolNode(epoch_cmp, BoolTest::ne));
+   IfNode* iff_epoch_ne = create_and_map_if(current_epoch_gen_control, test_epoch_ne, PROB_FAIR, COUNT_UNKNOWN);
+ 
+   // true path, epochs are not equal, there is a need to write a checkpoint for the vthread
+   Node* epoch_is_not_equal = _gvn.transform(new IfTrueNode(iff_epoch_ne));
+   // get the field offset for storing and updated tid and epoch value
+   Node* const tid_field_address = field_address_from_object(vthreadObj, "tid", "J", false);
+   const TypePtr* tid_field_address_type = _gvn.type(tid_field_address)->isa_ptr();
+ 
+   // shift up current epoch generation value
+   Node* left_shifted_current_epoch_gen = _gvn.transform(new LShiftLNode(current_epoch_gen_value, epoch_shift));
+   // OR the shifted epoch generation value with the threadid
+   Node* current_epoch_gen_and_tid = _gvn.transform(new OrLNode(vthread_obj_tid, left_shifted_current_epoch_gen));
+   // store back the current_epoch_gen_and_tid into the vthreadObject
+   Node* vthreadObj_epoch_gen_memory_store = store_to_memory(epoch_is_not_equal,
+                                                             tid_field_address,
+                                                             current_epoch_gen_and_tid,
+                                                             T_LONG,
+                                                             tid_field_address_type,
+                                                             MemNode::unordered);
+ 
+   // call out to the VM in order to write a checkpoint for the vthread
+   saved_ctl = control();
+   set_control(epoch_is_not_equal);
+   // call can safepoint
+   Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
+                                                   OptoRuntime::jfr_write_checkpoint_Type(),
+                                                   StubRoutines::jfr_write_checkpoint(),
+                                                   "write_checkpoint", TypePtr::BOTTOM, vthread_obj_tid, top());
+   // restore
+   set_control(saved_ctl);
+   Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));
+ 
+   // false path, epochs are the same, no need to write new checkpoint information
+   Node* epoch_is_equal = _gvn.transform(new IfFalseNode(iff_epoch_ne));
+ 
+   // need memory and IO
+   epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control);
+   epoch_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
+   epoch_compare_io->init_req(_true_path, i_o());
+   epoch_compare_rgn->init_req(_false_path, epoch_is_equal);
+   epoch_compare_mem->init_req(_false_path, input_memory_state);
+   epoch_compare_io->init_req(_false_path, input_io_state);
+ 
+   // merge the threadObj branch
+   threadObj_result_rgn->init_req(_true_path, _gvn.transform(epoch_compare_rgn));
+   threadObj_result_rgn->init_req(_false_path, virtual_thread_is_threadObj);
+   thread_id_mem->init_req(_true_path, _gvn.transform(epoch_compare_mem));
+   thread_id_mem->init_req(_false_path, input_memory_state);
+   thread_id_io->init_req(_true_path, _gvn.transform(epoch_compare_io));
+   thread_id_io->init_req(_false_path, input_io_state);
+   thread_id_val->init_req(_true_path, _gvn.transform(vthread_obj_tid));
+   thread_id_val->init_req(_false_path, _gvn.transform(thread_obj_tid));
+ 
+   // update memory and io state
+   set_all_memory(_gvn.transform(thread_id_mem));
+   set_i_o(_gvn.transform(thread_id_io));
+ 
+   // load the event writer oop by dereferencing the jobject handle
+   saved_ctl = control();
+   set_control(_gvn.transform(threadObj_result_rgn));
+   ciKlass* klass_EventWriter = env()->find_system_klass(ciSymbol::make("jdk/jfr/internal/EventWriter"));
+   assert(klass_EventWriter->is_loaded(), "invariant");
+   ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass();
+   const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter);
+   const TypeOopPtr* const xtype = aklass->as_instance_type();
+   Node* event_writer = access_load(jobj, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
+   // restore
+   set_control(saved_ctl);
+ 
+   // load the current thread id from the event writer object
+   Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
+   // get the field offset to store an updated tid value later (conditionally)
+   Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
+   const TypePtr* event_writer_tid_field_type = _gvn.type(event_writer_tid_field)->isa_ptr();
+ 
+   // thread id compare
+   RegionNode* tid_compare_rgn = new RegionNode(PATH_LIMIT);
+   record_for_igvn(tid_compare_rgn);
+   PhiNode*    tid_compare_mem = new PhiNode(tid_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
+   record_for_igvn(tid_compare_mem);
+   PhiNode*    tid_compare_io = new PhiNode(tid_compare_rgn, Type::ABIO);
+   record_for_igvn(tid_compare_io);
+ 
+   // compare current tid to what is stored in the event writer object
+   Node* const tid_cmp = _gvn.transform(new CmpLNode(event_writer_tid, _gvn.transform(thread_id_val)));
+   Node* test_tid_ne = _gvn.transform(new BoolNode(tid_cmp, BoolTest::ne));
+   IfNode* iff_tid_ne = create_and_map_if(_gvn.transform(threadObj_result_rgn), test_tid_ne, PROB_FAIR, COUNT_UNKNOWN);
+ 
+   // true path, tid not equal, need to store tid value to the event writer
+   Node* tid_is_not_equal = _gvn.transform(new IfTrueNode(iff_tid_ne));
+   record_for_igvn(tid_is_not_equal);
+   // update the event writer with the current thread id value
+   Node* event_writer_tid_memory_store = store_to_memory(tid_is_not_equal,
+                                                         event_writer_tid_field,
+                                                         thread_id_val,
+                                                         T_LONG,
+                                                         event_writer_tid_field_type,
+                                                         MemNode::unordered);
+ 
+   // false path, tids are the same, no update
+   Node* tid_is_equal = _gvn.transform(new IfFalseNode(iff_tid_ne));
+ 
+   // update controls
+   tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
+   tid_compare_rgn->init_req(_false_path, tid_is_equal);
+ 
+   // update memory phi node
+   tid_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
+   tid_compare_mem->init_req(_false_path, _gvn.transform(thread_id_mem));
+ 
+   // update io phi node
+   tid_compare_io->init_req(_true_path, _gvn.transform(i_o()));
+   tid_compare_io->init_req(_false_path, _gvn.transform(thread_id_io));
+ 
+   // result of top level CFG, Memory, IO and Value
+   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
+   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
+   PhiNode*    result_io = new PhiNode(result_reg, Type::ABIO);
+   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::BOTTOM);
  
!   // result control
!   result_reg->init_req(_true_path, _gvn.transform(tid_compare_rgn));
+   result_reg->init_req(_false_path, jobj_is_null);
  
!   // result memory
!   result_mem->init_req(_true_path, _gvn.transform(tid_compare_mem));
!   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
  
!   // result io
!   result_io->init_req(_true_path, _gvn.transform(tid_compare_io));
+   result_io->init_req(_false_path, _gvn.transform(input_io_state));
  
!   // result values
!   result_val->init_req(_true_path, _gvn.transform(event_writer)); // return event writer oop
!   result_val->init_req(_false_path, null()); // return NULL
  
!   // set output state
!   set_all_memory(_gvn.transform(result_mem));
!   set_i_o(_gvn.transform(result_io));
!   set_result(result_reg, result_val);
!   return true;
! }
  
! #endif // JFR_HAVE_INTRINSICS
  
+ //------------------------inline_native_currentThread0------------------
+ bool LibraryCallKit::inline_native_currentThread0() {
+   Node* junk = NULL;
+   set_result(generate_current_thread(junk));
    return true;
  }
  
! Node* LibraryCallKit::scopeLocalCache_helper() {
+   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
+   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
+ 
+   bool xk = etype->klass_is_exact();
+ 
+   Node* thread = _gvn.transform(new ThreadLocalNode());
+   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopeLocalCache_offset()));
+   return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
+         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
+ }
+ 
+ //------------------------inline_native_scopeLocalCache------------------
+ bool LibraryCallKit::inline_native_scopeLocalCache() {
+   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
+   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
+   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
+ 
+   // Because we create the scopeLocal cache lazily we have to make the
+   // type of the result BotPTR.
+   bool xk = etype->klass_is_exact();
+   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
+   Node* cache_obj_handle = scopeLocalCache_helper();
+   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
+ 
+   return true;
+ }
+ 
+ //------------------------inline_native_setScopeLocalCache------------------
+ bool LibraryCallKit::inline_native_setScopeLocalCache() {
+   Node* arr = argument(0);
+   Node* cache_obj_handle = scopeLocalCache_helper();
+ 
+   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
+   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
+                   MemNode::unordered);
+ 
+   return true;
+ }
  
  //------------------------inline_native_currentThread------------------
  bool LibraryCallKit::inline_native_currentThread() {
    Node* junk = NULL;
!   set_result(generate_virtual_thread(junk));
+   return true;
+ }
+ 
+ //------------------------inline_native_setVthread------------------
+ bool LibraryCallKit::inline_native_setCurrentThread() {
+   assert(C->method()->changes_current_thread(),
+          "method changes current Thread but is not annotated ChangesCurrentThread");
+   Node* arr = argument(1);
+   Node* thread = _gvn.transform(new ThreadLocalNode());
+   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
+   Node* thread_obj_handle
+     = make_load(NULL, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
+   thread_obj_handle = _gvn.transform(thread_obj_handle);
+   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
+   // Stores of oops to native memory not supported yet by BarrierSetC2::store_at_resolved
+   // access_store_at(NULL, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
+   store_to_memory(control(), thread_obj_handle, arr, T_OBJECT, adr_type, MemNode::unordered);
+ 
    return true;
  }
  
  //---------------------------load_mirror_from_klass----------------------------
  // Given a klass oop, load its java mirror (a java.lang.Class oop).

*** 5798,12 ***
    return true;
  }
  
  
  Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
!                                              DecoratorSet decorators = IN_HEAP, bool is_static = false,
!                                              ciInstanceKlass* fromKls = NULL) {
    if (fromKls == NULL) {
      const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
      assert(tinst != NULL, "obj is null");
      assert(tinst->klass()->is_loaded(), "obj is not loaded");
      fromKls = tinst->klass()->as_instance_klass();
--- 6122,12 ---
    return true;
  }
  
  
  Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
!                                              DecoratorSet decorators, bool is_static,
!                                              ciInstanceKlass* fromKls) {
    if (fromKls == NULL) {
      const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
      assert(tinst != NULL, "obj is null");
      assert(tinst->klass()->is_loaded(), "obj is not loaded");
      fromKls = tinst->klass()->as_instance_klass();

*** 5812,11 ***
    }
    ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
                                                ciSymbol::make(fieldTypeString),
                                                is_static);
  
!   assert (field != NULL, "undefined field");
    if (field == NULL) return (Node *) NULL;
  
    if (is_static) {
      const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
      fromObj = makecon(tip);
--- 6136,11 ---
    }
    ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
                                                ciSymbol::make(fieldTypeString),
                                                is_static);
  
!   assert (field != NULL, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName);
    if (field == NULL) return (Node *) NULL;
  
    if (is_static) {
      const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
      fromObj = makecon(tip);

*** 5847,12 ***
  
    return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
  }
  
  Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
!                                                  bool is_exact = true, bool is_static = false,
!                                                  ciInstanceKlass * fromKls = NULL) {
    if (fromKls == NULL) {
      const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
      assert(tinst != NULL, "obj is null");
      assert(tinst->klass()->is_loaded(), "obj is not loaded");
      assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
--- 6171,12 ---
  
    return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
  }
  
  Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
!                                                  bool is_exact, bool is_static,
!                                                  ciInstanceKlass * fromKls) {
    if (fromKls == NULL) {
      const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
      assert(tinst != NULL, "obj is null");
      assert(tinst->klass()->is_loaded(), "obj is not loaded");
      assert(!is_exact || tinst->klass_is_exact(), "klass not exact");

*** 6952,10 ***
--- 7276,19 ---
    Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
  
    return instof_false;  // even if it is NULL
  }
  
+ bool LibraryCallKit::inline_continuation_do_yield() {
+   address call_addr = StubRoutines::cont_doYield();
+   const TypeFunc* tf = OptoRuntime::continuation_doYield_Type();
+   Node* call = make_runtime_call(RC_NO_LEAF, tf, call_addr, "doYield", TypeRawPtr::BOTTOM);
+   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+   set_result(result);
+   return true;
+ }
+ 
  //-------------inline_fma-----------------------------------
  bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
    Node *a = NULL;
    Node *b = NULL;
    Node *c = NULL;
< prev index next >