< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"
  49 #include "opto/parse.hpp"
  50 #include "opto/runtime.hpp"
  51 #include "opto/rootnode.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "prims/unsafe.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/powerOfTwo.hpp"
  59 
  60 #if INCLUDE_JFR
  61 #include "jfr/jfr.hpp"

  62 #endif
  63 
  64 //---------------------------make_vm_intrinsic----------------------------
  65 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  66   vmIntrinsicID id = m->intrinsic_id();
  67   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  68 
  69   if (!m->is_loaded()) {
  70     // Do not attempt to inline unloaded methods.
  71     return NULL;
  72   }
  73 
  74   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  75   bool is_available = false;
  76 
  77   {
  78     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  79     // the compiler must transition to '_thread_in_vm' state because both
  80     // methods access VM-internal data.
  81     VM_ENTRY_MARK;

 453   case vmIntrinsics::_compareAndExchangeLongAcquire:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
 454   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 455 
 456   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 457   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 458   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 459   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 460 
 461   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 462   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 463   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 464   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 465   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 466 
 467   case vmIntrinsics::_loadFence:
 468   case vmIntrinsics::_storeFence:
 469   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 470 
 471   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 472 

 473   case vmIntrinsics::_currentThread:            return inline_native_currentThread();




 474 
 475 #ifdef JFR_HAVE_INTRINSICS
 476   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
 477   case vmIntrinsics::_getClassId:               return inline_native_classID();
 478   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 479 #endif
 480   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 481   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 482   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 483   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 484   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 485   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 486   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 487   case vmIntrinsics::_getLength:                return inline_native_getLength();
 488   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 489   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 490   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 491   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 492   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 493   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 494   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 495 
 496   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 497   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 607   case vmIntrinsics::_updateDirectByteBufferCRC32C:
 608     return inline_updateDirectByteBufferCRC32C();
 609 
 610   case vmIntrinsics::_updateBytesAdler32:
 611     return inline_updateBytesAdler32();
 612   case vmIntrinsics::_updateByteBufferAdler32:
 613     return inline_updateByteBufferAdler32();
 614 
 615   case vmIntrinsics::_profileBoolean:
 616     return inline_profileBoolean();
 617   case vmIntrinsics::_isCompileConstant:
 618     return inline_isCompileConstant();
 619 
 620   case vmIntrinsics::_hasNegatives:
 621     return inline_hasNegatives();
 622 
 623   case vmIntrinsics::_fmaD:
 624   case vmIntrinsics::_fmaF:
 625     return inline_fma(intrinsic_id());
 626 



 627   case vmIntrinsics::_isDigit:
 628   case vmIntrinsics::_isLowerCase:
 629   case vmIntrinsics::_isUpperCase:
 630   case vmIntrinsics::_isWhitespace:
 631     return inline_character_compare(intrinsic_id());
 632 
 633   case vmIntrinsics::_maxF:
 634   case vmIntrinsics::_minF:
 635   case vmIntrinsics::_maxD:
 636   case vmIntrinsics::_minD:
 637     return inline_fp_min_max(intrinsic_id());
 638 
 639   case vmIntrinsics::_VectorUnaryOp:
 640     return inline_vector_nary_operation(1);
 641   case vmIntrinsics::_VectorBinaryOp:
 642     return inline_vector_nary_operation(2);
 643   case vmIntrinsics::_VectorTernaryOp:
 644     return inline_vector_nary_operation(3);
 645   case vmIntrinsics::_VectorBroadcastCoerced:
 646     return inline_vector_broadcast_coerced();

 851   record_for_igvn(bailout);
 852   if (char_count) {
 853     // Convert char count to byte count
 854     count = _gvn.transform(new LShiftINode(count, intcon(1)));
 855   }
 856 
 857   // Offset and count must not be negative
 858   generate_negative_guard(offset, bailout);
 859   generate_negative_guard(count, bailout);
 860   // Offset + count must not exceed length of array
 861   generate_limit_guard(offset, count, load_array_length(array), bailout);
 862 
 863   if (bailout->req() > 1) {
 864     PreserveJVMState pjvms(this);
 865     set_control(_gvn.transform(bailout));
 866     uncommon_trap(Deoptimization::Reason_intrinsic,
 867                   Deoptimization::Action_maybe_recompile);
 868   }
 869 }
 870 
 871 //--------------------------generate_current_thread--------------------
 872 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
 873   ciKlass*    thread_klass = env()->Thread_klass();
 874   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);


 875   Node* thread = _gvn.transform(new ThreadLocalNode());
 876   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
 877   tls_output = thread;
 878   Node* thread_obj_handle = LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);





 879   thread_obj_handle = _gvn.transform(thread_obj_handle);
 880   return access_load(thread_obj_handle, thread_type, T_OBJECT, IN_NATIVE | C2_IMMUTABLE_MEMORY);





 881 }
 882 











 883 
 884 //------------------------------make_string_method_node------------------------
 885 // Helper method for String intrinsic functions. This version is called with
 886 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
 887 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
 888 // containing the lengths of str1 and str2.
 889 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
 890   Node* result = NULL;
 891   switch (opcode) {
 892   case Op_StrIndexOf:
 893     result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
 894                                 str1_start, cnt1, str2_start, cnt2, ae);
 895     break;
 896   case Op_StrComp:
 897     result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
 898                              str1_start, cnt1, str2_start, cnt2, ae);
 899     break;
 900   case Op_StrEquals:
 901     // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
 902     // Use the constant length if there is one because optimized match rule may exist.

2877       Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
2878       ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
2879     } __ else_(); {
2880       // void class case
2881       ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1)));
2882     } __ end_if();
2883 
2884     Node* signaled_flag_address = makecon(TypeRawPtr::make(Jfr::signal_address()));
2885     Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
2886     __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
2887       ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
2888     } __ end_if();
2889   } __ end_if();
2890 
2891   final_sync(ideal);
2892   set_result(ideal.value(result));
2893 #undef __
2894   return true;
2895 }
2896 






























2897 bool LibraryCallKit::inline_native_getEventWriter() {








2898   Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
2899 
2900   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
2901                                   in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
2902 

2903   Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2904 
2905   Node* jobj_cmp_null = _gvn.transform( new CmpPNode(jobj, null()) );
2906   Node* test_jobj_eq_null  = _gvn.transform( new BoolNode(jobj_cmp_null, BoolTest::eq) );
































































































































































































2907 
2908   IfNode* iff_jobj_null =
2909     create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);

2910 
2911   enum { _normal_path = 1,
2912          _null_path = 2,
2913          PATH_LIMIT };
2914 
2915   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
2916   PhiNode*    result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);

2917 
2918   Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
2919   result_rgn->init_req(_null_path, jobj_is_null);
2920   result_val->init_req(_null_path, null());
2921 
2922   Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
2923   set_control(jobj_is_not_null);
2924   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
2925                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
2926   result_rgn->init_req(_normal_path, control());
2927   result_val->init_req(_normal_path, res);
2928 
2929   set_result(result_rgn, result_val);
2930 




2931   return true;
2932 }
2933 
2934 #endif // JFR_HAVE_INTRINSICS






































2935 
2936 //------------------------inline_native_currentThread------------------
2937 bool LibraryCallKit::inline_native_currentThread() {
2938   Node* junk = NULL;
2939   set_result(generate_current_thread(junk));


















2940   return true;
2941 }
2942 
2943 //---------------------------load_mirror_from_klass----------------------------
2944 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2945 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2946   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
2947   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2948   // mirror = ((OopHandle)mirror)->resolve();
2949   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
2950 }
2951 
2952 //-----------------------load_klass_from_mirror_common-------------------------
2953 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2954 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2955 // and branch to the given path on the region.
2956 // If never_see_null, take an uncommon trap on null, so we can optimistically
2957 // compile for the non-null case.
2958 // If the region is NULL, force never_see_null = true.
2959 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,

5783 
5784   RegionNode* region = new RegionNode(3);
5785   PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
5786 
5787   Node* if_true = _gvn.transform(new IfTrueNode(if_node));
5788   region->init_req(1, if_true);
5789   phi->init_req(1, intcon(1));
5790 
5791   Node* if_false = _gvn.transform(new IfFalseNode(if_node));
5792   region->init_req(2, if_false);
5793   phi->init_req(2, intcon(0));
5794 
5795   set_control(_gvn.transform(region));
5796   record_for_igvn(region);
5797   set_result(_gvn.transform(phi));
5798   return true;
5799 }
5800 
5801 
5802 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
5803                                              DecoratorSet decorators = IN_HEAP, bool is_static = false,
5804                                              ciInstanceKlass* fromKls = NULL) {
5805   if (fromKls == NULL) {
5806     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5807     assert(tinst != NULL, "obj is null");
5808     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5809     fromKls = tinst->klass()->as_instance_klass();
5810   } else {
5811     assert(is_static, "only for static field access");
5812   }
5813   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5814                                               ciSymbol::make(fieldTypeString),
5815                                               is_static);
5816 
5817   assert (field != NULL, "undefined field");
5818   if (field == NULL) return (Node *) NULL;
5819 
5820   if (is_static) {
5821     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5822     fromObj = makecon(tip);
5823   }
5824 
5825   // Next code  copied from Parse::do_get_xxx():
5826 
5827   // Compute address and memory type.
5828   int offset  = field->offset_in_bytes();
5829   bool is_vol = field->is_volatile();
5830   ciType* field_klass = field->type();
5831   assert(field_klass->is_loaded(), "should be loaded");
5832   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5833   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5834   BasicType bt = field->layout_type();
5835 
5836   // Build the resultant type of the load
5837   const Type *type;
5838   if (bt == T_OBJECT) {
5839     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5840   } else {
5841     type = Type::get_const_basic_type(bt);
5842   }
5843 
5844   if (is_vol) {
5845     decorators |= MO_SEQ_CST;
5846   }
5847 
5848   return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
5849 }
5850 
5851 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5852                                                  bool is_exact = true, bool is_static = false,
5853                                                  ciInstanceKlass * fromKls = NULL) {
5854   if (fromKls == NULL) {
5855     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5856     assert(tinst != NULL, "obj is null");
5857     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5858     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5859     fromKls = tinst->klass()->as_instance_klass();
5860   }
5861   else {
5862     assert(is_static, "only for static field access");
5863   }
5864   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5865     ciSymbol::make(fieldTypeString),
5866     is_static);
5867 
5868   assert(field != NULL, "undefined field");
5869   assert(!field->is_volatile(), "not defined for volatile fields");
5870 
5871   if (is_static) {
5872     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5873     fromObj = makecon(tip);

6937   ciKlass* klass = NULL;
6938   if (klass_name != NULL) {
6939     klass = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_name));
6940   }
6941   if ((klass == NULL) || !klass->is_loaded()) {
6942     // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6943     Node* ctrl = control();
6944     set_control(top()); // no intrinsic path
6945     return ctrl;
6946   }
6947   ciInstanceKlass* instklass = klass->as_instance_klass();
6948 
6949   Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
6950   Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6951   Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6952   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6953 
6954   return instof_false;  // even if it is NULL
6955 }
6956 









6957 //-------------inline_fma-----------------------------------
6958 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
6959   Node *a = NULL;
6960   Node *b = NULL;
6961   Node *c = NULL;
6962   Node* result = NULL;
6963   switch (id) {
6964   case vmIntrinsics::_fmaD:
6965     assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
6966     // no receiver since it is static method
6967     a = round_double_node(argument(0));
6968     b = round_double_node(argument(2));
6969     c = round_double_node(argument(4));
6970     result = _gvn.transform(new FmaDNode(control(), a, b, c));
6971     break;
6972   case vmIntrinsics::_fmaF:
6973     assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
6974     a = argument(0);
6975     b = argument(1);
6976     c = argument(2);

  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"
  49 #include "opto/parse.hpp"
  50 #include "opto/runtime.hpp"
  51 #include "opto/rootnode.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "prims/unsafe.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/powerOfTwo.hpp"
  59 
  60 #if INCLUDE_JFR
  61 #include "jfr/jfr.hpp"
  62 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp" // FIXME
  63 #endif
  64 
  65 //---------------------------make_vm_intrinsic----------------------------
  66 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  67   vmIntrinsicID id = m->intrinsic_id();
  68   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  69 
  70   if (!m->is_loaded()) {
  71     // Do not attempt to inline unloaded methods.
  72     return NULL;
  73   }
  74 
  75   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  76   bool is_available = false;
  77 
  78   {
  79     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  80     // the compiler must transition to '_thread_in_vm' state because both
  81     // methods access VM-internal data.
  82     VM_ENTRY_MARK;

 454   case vmIntrinsics::_compareAndExchangeLongAcquire:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
 455   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 456 
 457   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 458   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 459   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 460   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 461 
 462   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 463   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 464   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 465   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 466   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 467 
 468   case vmIntrinsics::_loadFence:
 469   case vmIntrinsics::_storeFence:
 470   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 471 
 472   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 473 
 474   case vmIntrinsics::_currentThread0:           return inline_native_currentThread0();
 475   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 476   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
 477 
 478   case vmIntrinsics::_scopeLocalCache:          return inline_native_scopeLocalCache();
 479   case vmIntrinsics::_setScopeLocalCache:       return inline_native_setScopeLocalCache();
 480 
 481 #ifdef JFR_HAVE_INTRINSICS
 482   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");

 483   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 484 #endif
 485   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 486   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 487   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 488   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 489   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 490   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 491   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 492   case vmIntrinsics::_getLength:                return inline_native_getLength();
 493   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 494   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 495   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 496   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 497   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 498   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 499   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 500 
 501   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 502   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 612   case vmIntrinsics::_updateDirectByteBufferCRC32C:
 613     return inline_updateDirectByteBufferCRC32C();
 614 
 615   case vmIntrinsics::_updateBytesAdler32:
 616     return inline_updateBytesAdler32();
 617   case vmIntrinsics::_updateByteBufferAdler32:
 618     return inline_updateByteBufferAdler32();
 619 
 620   case vmIntrinsics::_profileBoolean:
 621     return inline_profileBoolean();
 622   case vmIntrinsics::_isCompileConstant:
 623     return inline_isCompileConstant();
 624 
 625   case vmIntrinsics::_hasNegatives:
 626     return inline_hasNegatives();
 627 
 628   case vmIntrinsics::_fmaD:
 629   case vmIntrinsics::_fmaF:
 630     return inline_fma(intrinsic_id());
 631 
 632   case vmIntrinsics::_Continuation_doYield:
 633     return inline_continuation_do_yield();
 634 
 635   case vmIntrinsics::_isDigit:
 636   case vmIntrinsics::_isLowerCase:
 637   case vmIntrinsics::_isUpperCase:
 638   case vmIntrinsics::_isWhitespace:
 639     return inline_character_compare(intrinsic_id());
 640 
 641   case vmIntrinsics::_maxF:
 642   case vmIntrinsics::_minF:
 643   case vmIntrinsics::_maxD:
 644   case vmIntrinsics::_minD:
 645     return inline_fp_min_max(intrinsic_id());
 646 
 647   case vmIntrinsics::_VectorUnaryOp:
 648     return inline_vector_nary_operation(1);
 649   case vmIntrinsics::_VectorBinaryOp:
 650     return inline_vector_nary_operation(2);
 651   case vmIntrinsics::_VectorTernaryOp:
 652     return inline_vector_nary_operation(3);
 653   case vmIntrinsics::_VectorBroadcastCoerced:
 654     return inline_vector_broadcast_coerced();

 859   record_for_igvn(bailout);
 860   if (char_count) {
 861     // Convert char count to byte count
 862     count = _gvn.transform(new LShiftINode(count, intcon(1)));
 863   }
 864 
 865   // Offset and count must not be negative
 866   generate_negative_guard(offset, bailout);
 867   generate_negative_guard(count, bailout);
 868   // Offset + count must not exceed length of array
 869   generate_limit_guard(offset, count, load_array_length(array), bailout);
 870 
 871   if (bailout->req() > 1) {
 872     PreserveJVMState pjvms(this);
 873     set_control(_gvn.transform(bailout));
 874     uncommon_trap(Deoptimization::Reason_intrinsic,
 875                   Deoptimization::Action_maybe_recompile);
 876   }
 877 }
 878 
 879 Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
 880                                             bool is_immutable) {
 881   ciKlass* thread_klass = env()->Thread_klass();
 882   const Type* thread_type
 883     = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
 884 
 885   Node* thread = _gvn.transform(new ThreadLocalNode());
 886   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(handle_offset));
 887   tls_output = thread;
 888 
 889   Node* thread_obj_handle
 890     = (is_immutable
 891       ? LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
 892         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
 893       : make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
 894   thread_obj_handle = _gvn.transform(thread_obj_handle);
 895 
 896   DecoratorSet decorators = IN_NATIVE;
 897   if (is_immutable) {
 898     decorators |= C2_IMMUTABLE_MEMORY;
 899   }
 900   return access_load(thread_obj_handle, thread_type, T_OBJECT, decorators);
 901 }
 902 
 903 //--------------------------generate_current_thread--------------------
 904 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
 905   return current_thread_helper(tls_output, JavaThread::threadObj_offset(),
 906                                /*is_immutable*/false);
 907 }
 908 
 909 //--------------------------generate_virtual_thread--------------------
 910 Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) {
 911   return current_thread_helper(tls_output, JavaThread::vthread_offset(),
 912                                !C->method()->changes_current_thread());
 913 }
 914 
 915 //------------------------------make_string_method_node------------------------
 916 // Helper method for String intrinsic functions. This version is called with
 917 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
 918 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
 919 // containing the lengths of str1 and str2.
 920 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
 921   Node* result = NULL;
 922   switch (opcode) {
 923   case Op_StrIndexOf:
 924     result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
 925                                 str1_start, cnt1, str2_start, cnt2, ae);
 926     break;
 927   case Op_StrComp:
 928     result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
 929                              str1_start, cnt1, str2_start, cnt2, ae);
 930     break;
 931   case Op_StrEquals:
 932     // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
 933     // Use the constant length if there is one because optimized match rule may exist.

2908       Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
2909       ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
2910     } __ else_(); {
2911       // void class case
2912       ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1)));
2913     } __ end_if();
2914 
2915     Node* signaled_flag_address = makecon(TypeRawPtr::make(Jfr::signal_address()));
2916     Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
2917     __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
2918       ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
2919     } __ end_if();
2920   } __ end_if();
2921 
2922   final_sync(ideal);
2923   set_result(ideal.value(result));
2924 #undef __
2925   return true;
2926 }
2927 
2928 /*
2929     jobject h_event_writer = Thread::jfr_thread_local()->java_event_writer();
2930     if (h_event_writer == NULL) {
2931       return NULL;
2932     }
2933     oop threadObj = Thread::threadObj();
2934     oop vthread = java_lang_Thread::vthread(threadObj);
2935     traceid tid;
2936     if (vthread != threadObj) {  // i.e. current thread is virtual
2937       traceid value = java_lang_VirtualThread::tid(vthread);
2938       tid = value & tid_mask;
2939       traceid epoch = value >> epoch_shift;
2940       traceid current_epoch = JfrTraceIdEpoch::current_generation();
2941       if (epoch != current_epoch) {
2942         traceid update_value = current_epoch << epoch_shift;
2943         update_value |= tid;
2944         java_lang_VirtualThread::set_tid(vthread, update_value);
2945         write_checkpoint(tid);
2946       }
2947     } else {
2948       tid = java_lang_Thread::tid(threadObj);
2949     }
2950     oop event_writer = JNIHandles::resolve_non_null(h_event_writer);
2951     traceid tid_in_event_writer = getField(event_writer, "threadID");
2952     if (tid_in_event_writer != tid) {
2953       setField(event_writer, "threadID", tid);
2954     }
2955     return event_writer;
2956 
2957  */
2958 bool LibraryCallKit::inline_native_getEventWriter() {
2959   enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
2960 
2961   // save input memory and i_o state
2962   Node* input_memory_state = reset_memory();
2963   set_all_memory(input_memory_state);
2964   Node* input_io_state = i_o();
2965 
2966   // TLS
2967   Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
2968 
2969   // load offset of jfr_thread_local
2970   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
2971 
2972   // Load eventwriter jobject handle from the jfr_thread_local
2973   Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2974 
2975   // Null check the jobject handle
2976   Node* jobj_cmp_null = _gvn.transform(new CmpPNode(jobj, null()));
2977   Node* test_jobj_ne_null = _gvn.transform(new BoolNode(jobj_cmp_null, BoolTest::ne));
2978   IfNode* iff_jobj_ne_null = create_and_map_if(control(), test_jobj_ne_null, PROB_MAX, COUNT_UNKNOWN);
2979 
2980   // false path, jobj is null
2981   Node* jobj_is_null = _gvn.transform(new IfFalseNode(iff_jobj_ne_null));
2982 
2983   // true path, jobj is not null
2984   Node* jobj_is_not_null = _gvn.transform(new IfTrueNode(iff_jobj_ne_null));
2985 
2986   // load the threadObj for the CarrierThread
2987   Node* const threadObj = generate_current_thread(tls_ptr);
2988 
2989   // load the vthread field
2990   Node* const vthreadObj = generate_virtual_thread(tls_ptr);
2991 
2992   // vthread != threadObj
2993   RegionNode* threadObj_result_rgn = new RegionNode(PATH_LIMIT);
2994   record_for_igvn(threadObj_result_rgn);
2995   PhiNode*    thread_id_mem = new PhiNode(threadObj_result_rgn, Type::MEMORY, TypePtr::BOTTOM);
2996   PhiNode*    thread_id_io = new PhiNode(threadObj_result_rgn, Type::ABIO);
2997   record_for_igvn(thread_id_io);
2998   PhiNode*    thread_id_val = new PhiNode(threadObj_result_rgn, TypeLong::LONG);
2999   record_for_igvn(thread_id_val);
3000 
3001   // If vthread != thread, this is a virtual thread
3002   Node* vthreadObj_cmp_threadObj = _gvn.transform(new CmpPNode(vthreadObj, threadObj));
3003   Node* test_vthreadObj_ne_threadObj = _gvn.transform(new BoolNode(vthreadObj_cmp_threadObj, BoolTest::ne));
3004   IfNode* iff_vthreadObj_ne_threadObj =
3005     create_and_map_if(jobj_is_not_null, test_vthreadObj_ne_threadObj, PROB_FAIR, COUNT_UNKNOWN);
3006 
3007   // false branch, fallback to threadObj
3008   Node* virtual_thread_is_threadObj = _gvn.transform(new IfFalseNode(iff_vthreadObj_ne_threadObj));
3009   Node* thread_obj_tid = load_field_from_object(threadObj, "tid", "J");
3010 
3011   // true branch, this is a virtual thread
3012   Node* virtual_thread_is_not_threadObj = _gvn.transform(new IfTrueNode(iff_vthreadObj_ne_threadObj));
3013   // read the thread id from the vthread
3014   Node* vthread_obj_tid_value = load_field_from_object(vthreadObj, "tid", "J");
3015 
3016   // bit shift and mask
3017   Node* const epoch_shift = _gvn.intcon(jfr_epoch_shift);
3018   Node* const tid_mask = _gvn.longcon(jfr_id_mask);
3019 
3020   // mask off the epoch information from the thread id
3021   Node* const vthread_obj_tid = _gvn.transform(new AndLNode(vthread_obj_tid_value, tid_mask));
3022   // shift thread id value down for last epoch
3023   Node* const vthread_epoch = _gvn.transform(new URShiftLNode(vthread_obj_tid_value, epoch_shift));
3024 
3025   // epoch compare
3026   RegionNode* epoch_compare_rgn = new RegionNode(PATH_LIMIT);
3027   record_for_igvn(epoch_compare_rgn);
3028   PhiNode*    epoch_compare_mem = new PhiNode(epoch_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3029   record_for_igvn(epoch_compare_mem);
3030   PhiNode*    epoch_compare_io = new PhiNode(epoch_compare_rgn, Type::ABIO);
3031   record_for_igvn(epoch_compare_io);
3032 
3033   Node* saved_ctl = control();
3034   set_control(virtual_thread_is_not_threadObj);
3035   TypePtr* const no_memory_effects = NULL;
3036   // make a runtime call to get the current epoch
3037   Node* call_epoch_generation = make_runtime_call(RC_LEAF | RC_NO_FP,
3038                                                   OptoRuntime::void_long_Type(),
3039                                                   (address)JFR_EPOCH_GENERATION_FUNCTION,
3040                                                   "epoch_generation", no_memory_effects);
3041   // restore
3042   set_control(saved_ctl);
3043 
3044   Node* current_epoch_gen_control = _gvn.transform(new ProjNode(call_epoch_generation, TypeFunc::Control));
3045   Node* current_epoch_gen_value = _gvn.transform(new ProjNode(call_epoch_generation, TypeFunc::Parms));
3046 
3047   // compare epoch in vthread to the current epoch generation
3048   Node* const epoch_cmp = _gvn.transform(new CmpLNode(current_epoch_gen_value, vthread_epoch));
3049   Node* test_epoch_ne = _gvn.transform(new BoolNode(epoch_cmp, BoolTest::ne));
3050   IfNode* iff_epoch_ne = create_and_map_if(current_epoch_gen_control, test_epoch_ne, PROB_FAIR, COUNT_UNKNOWN);
3051 
3052   // true path, epochs are not equal, there is a need to write a checkpoint for the vthread
3053   Node* epoch_is_not_equal = _gvn.transform(new IfTrueNode(iff_epoch_ne));
3054   // get the field offset for storing and updated tid and epoch value
3055   Node* const tid_field_address = field_address_from_object(vthreadObj, "tid", "J", false);
3056   const TypePtr* tid_field_address_type = _gvn.type(tid_field_address)->isa_ptr();
3057 
3058   // shift up current epoch generation value
3059   Node* left_shifted_current_epoch_gen = _gvn.transform(new LShiftLNode(current_epoch_gen_value, epoch_shift));
3060   // OR the shifted epoch generation value with the threadid
3061   Node* current_epoch_gen_and_tid = _gvn.transform(new OrLNode(vthread_obj_tid, left_shifted_current_epoch_gen));
3062   // store back the current_epoch_gen_and_tid into the vthreadObject
3063   Node* vthreadObj_epoch_gen_memory_store = store_to_memory(epoch_is_not_equal,
3064                                                             tid_field_address,
3065                                                             current_epoch_gen_and_tid,
3066                                                             T_LONG,
3067                                                             tid_field_address_type,
3068                                                             MemNode::unordered);
3069 
3070   // call out to the VM in order to write a checkpoint for the vthread
3071   saved_ctl = control();
3072   set_control(epoch_is_not_equal);
3073   // call can safepoint
3074   Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
3075                                                   OptoRuntime::jfr_write_checkpoint_Type(),
3076                                                   StubRoutines::jfr_write_checkpoint(),
3077                                                   "write_checkpoint", TypePtr::BOTTOM, vthread_obj_tid, top());
3078   // restore
3079   set_control(saved_ctl);
3080   Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));
3081 
3082   // false path, epochs are the same, no need to write new checkpoint information
3083   Node* epoch_is_equal = _gvn.transform(new IfFalseNode(iff_epoch_ne));
3084 
3085   // need memory and IO
3086   epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control);
3087   epoch_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3088   epoch_compare_io->init_req(_true_path, i_o());
3089   epoch_compare_rgn->init_req(_false_path, epoch_is_equal);
3090   epoch_compare_mem->init_req(_false_path, input_memory_state);
3091   epoch_compare_io->init_req(_false_path, input_io_state);
3092 
3093   // merge the threadObj branch
3094   threadObj_result_rgn->init_req(_true_path, _gvn.transform(epoch_compare_rgn));
3095   threadObj_result_rgn->init_req(_false_path, virtual_thread_is_threadObj);
3096   thread_id_mem->init_req(_true_path, _gvn.transform(epoch_compare_mem));
3097   thread_id_mem->init_req(_false_path, input_memory_state);
3098   thread_id_io->init_req(_true_path, _gvn.transform(epoch_compare_io));
3099   thread_id_io->init_req(_false_path, input_io_state);
3100   thread_id_val->init_req(_true_path, _gvn.transform(vthread_obj_tid));
3101   thread_id_val->init_req(_false_path, _gvn.transform(thread_obj_tid));
3102 
3103   // update memory and io state
3104   set_all_memory(_gvn.transform(thread_id_mem));
3105   set_i_o(_gvn.transform(thread_id_io));
3106 
3107   // load the event writer oop by dereferencing the jobject handle
3108   saved_ctl = control();
3109   set_control(_gvn.transform(threadObj_result_rgn));
3110   ciKlass* klass_EventWriter = env()->find_system_klass(ciSymbol::make("jdk/jfr/internal/EventWriter"));
3111   assert(klass_EventWriter->is_loaded(), "invariant");
3112   ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass();
3113   const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter);
3114   const TypeOopPtr* const xtype = aklass->as_instance_type();
3115   Node* event_writer = access_load(jobj, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3116   // restore
3117   set_control(saved_ctl);
3118 
3119   // load the current thread id from the event writer object
3120   Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
3121   // get the field offset to store an updated tid value later (conditionally)
3122   Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
3123   const TypePtr* event_writer_tid_field_type = _gvn.type(event_writer_tid_field)->isa_ptr();
3124 
3125   // thread id compare
3126   RegionNode* tid_compare_rgn = new RegionNode(PATH_LIMIT);
3127   record_for_igvn(tid_compare_rgn);
3128   PhiNode*    tid_compare_mem = new PhiNode(tid_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3129   record_for_igvn(tid_compare_mem);
3130   PhiNode*    tid_compare_io = new PhiNode(tid_compare_rgn, Type::ABIO);
3131   record_for_igvn(tid_compare_io);
3132 
3133   // compare current tid to what is stored in the event writer object
3134   Node* const tid_cmp = _gvn.transform(new CmpLNode(event_writer_tid, _gvn.transform(thread_id_val)));
3135   Node* test_tid_ne = _gvn.transform(new BoolNode(tid_cmp, BoolTest::ne));
3136   IfNode* iff_tid_ne = create_and_map_if(_gvn.transform(threadObj_result_rgn), test_tid_ne, PROB_FAIR, COUNT_UNKNOWN);
3137 
3138   // true path, tid not equal, need to store tid value to the event writer
3139   Node* tid_is_not_equal = _gvn.transform(new IfTrueNode(iff_tid_ne));
3140   record_for_igvn(tid_is_not_equal);
3141   // update the event writer with the current thread id value
3142   Node* event_writer_tid_memory_store = store_to_memory(tid_is_not_equal,
3143                                                         event_writer_tid_field,
3144                                                         thread_id_val,
3145                                                         T_LONG,
3146                                                         event_writer_tid_field_type,
3147                                                         MemNode::unordered);
3148 
3149   // false path, tids are the same, no update
3150   Node* tid_is_equal = _gvn.transform(new IfFalseNode(iff_tid_ne));
3151 
3152   // update controls
3153   tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
3154   tid_compare_rgn->init_req(_false_path, tid_is_equal);
3155 
3156   // update memory phi node
3157   tid_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3158   tid_compare_mem->init_req(_false_path, _gvn.transform(thread_id_mem));
3159 
3160   // update io phi node
3161   tid_compare_io->init_req(_true_path, _gvn.transform(i_o()));
3162   tid_compare_io->init_req(_false_path, _gvn.transform(thread_id_io));
3163 
3164   // result of top level CFG, Memory, IO and Value
3165   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3166   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3167   PhiNode*    result_io = new PhiNode(result_reg, Type::ABIO);
3168   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::BOTTOM);
3169 
3170   // result control
3171   result_reg->init_req(_true_path, _gvn.transform(tid_compare_rgn));
3172   result_reg->init_req(_false_path, jobj_is_null);
3173 
3174   // result memory
3175   result_mem->init_req(_true_path, _gvn.transform(tid_compare_mem));
3176   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3177 
3178   // result io
3179   result_io->init_req(_true_path, _gvn.transform(tid_compare_io));
3180   result_io->init_req(_false_path, _gvn.transform(input_io_state));
3181 
3182   // result values
3183   result_val->init_req(_true_path, _gvn.transform(event_writer)); // return event writer oop
3184   result_val->init_req(_false_path, null()); // return NULL
3185 
3186   // set output state
3187   set_all_memory(_gvn.transform(result_mem));
3188   set_i_o(_gvn.transform(result_io));
3189   set_result(result_reg, result_val);
3190   return true;
3191 }
3192 
3193 #endif // JFR_HAVE_INTRINSICS
3194 
3195 //------------------------inline_native_currentThread0------------------
3196 bool LibraryCallKit::inline_native_currentThread0() {
3197   Node* junk = NULL;
3198   set_result(generate_current_thread(junk));
3199   return true;
3200 }
3201 
3202 Node* LibraryCallKit::scopeLocalCache_helper() {
3203   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3204   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3205 
3206   bool xk = etype->klass_is_exact();
3207 
3208   Node* thread = _gvn.transform(new ThreadLocalNode());
3209   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopeLocalCache_offset()));
3210   return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
3211         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3212 }
3213 
3214 //------------------------inline_native_scopeLocalCache------------------
3215 bool LibraryCallKit::inline_native_scopeLocalCache() {
3216   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3217   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3218   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3219 
3220   // Because we create the scopeLocal cache lazily we have to make the
3221   // type of the result BotPTR.
3222   bool xk = etype->klass_is_exact();
3223   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3224   Node* cache_obj_handle = scopeLocalCache_helper();
3225   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3226 
3227   return true;
3228 }
3229 
3230 //------------------------inline_native_setScopeLocalCache------------------
3231 bool LibraryCallKit::inline_native_setScopeLocalCache() {
3232   Node* arr = argument(0);
3233   Node* cache_obj_handle = scopeLocalCache_helper();
3234 
3235   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3236   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
3237                   MemNode::unordered);
3238 
3239   return true;
3240 }
3241 
3242 //------------------------inline_native_currentThread------------------
3243 bool LibraryCallKit::inline_native_currentThread() {
3244   Node* junk = NULL;
3245   set_result(generate_virtual_thread(junk));
3246   return true;
3247 }
3248 
3249 //------------------------inline_native_setVthread------------------
3250 bool LibraryCallKit::inline_native_setCurrentThread() {
3251   assert(C->method()->changes_current_thread(),
3252          "method changes current Thread but is not annotated ChangesCurrentThread");
3253   Node* arr = argument(1);
3254   Node* thread = _gvn.transform(new ThreadLocalNode());
3255   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3256   Node* thread_obj_handle
3257     = make_load(NULL, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3258   thread_obj_handle = _gvn.transform(thread_obj_handle);
3259   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3260   // Stores of oops to native memory not supported yet by BarrierSetC2::store_at_resolved
3261   // access_store_at(NULL, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3262   store_to_memory(control(), thread_obj_handle, arr, T_OBJECT, adr_type, MemNode::unordered);
3263 
3264   return true;
3265 }
3266 
3267 //---------------------------load_mirror_from_klass----------------------------
3268 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3269 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3270   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3271   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3272   // mirror = ((OopHandle)mirror)->resolve();
3273   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3274 }
3275 
3276 //-----------------------load_klass_from_mirror_common-------------------------
3277 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3278 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3279 // and branch to the given path on the region.
3280 // If never_see_null, take an uncommon trap on null, so we can optimistically
3281 // compile for the non-null case.
3282 // If the region is NULL, force never_see_null = true.
3283 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,

6107 
6108   RegionNode* region = new RegionNode(3);
6109   PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
6110 
6111   Node* if_true = _gvn.transform(new IfTrueNode(if_node));
6112   region->init_req(1, if_true);
6113   phi->init_req(1, intcon(1));
6114 
6115   Node* if_false = _gvn.transform(new IfFalseNode(if_node));
6116   region->init_req(2, if_false);
6117   phi->init_req(2, intcon(0));
6118 
6119   set_control(_gvn.transform(region));
6120   record_for_igvn(region);
6121   set_result(_gvn.transform(phi));
6122   return true;
6123 }
6124 
6125 
6126 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
6127                                              DecoratorSet decorators, bool is_static,
6128                                              ciInstanceKlass* fromKls) {
6129   if (fromKls == NULL) {
6130     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6131     assert(tinst != NULL, "obj is null");
6132     assert(tinst->klass()->is_loaded(), "obj is not loaded");
6133     fromKls = tinst->klass()->as_instance_klass();
6134   } else {
6135     assert(is_static, "only for static field access");
6136   }
6137   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6138                                               ciSymbol::make(fieldTypeString),
6139                                               is_static);
6140 
6141   assert (field != NULL, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName);
6142   if (field == NULL) return (Node *) NULL;
6143 
6144   if (is_static) {
6145     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
6146     fromObj = makecon(tip);
6147   }
6148 
6149   // Next code  copied from Parse::do_get_xxx():
6150 
6151   // Compute address and memory type.
6152   int offset  = field->offset_in_bytes();
6153   bool is_vol = field->is_volatile();
6154   ciType* field_klass = field->type();
6155   assert(field_klass->is_loaded(), "should be loaded");
6156   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6157   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6158   BasicType bt = field->layout_type();
6159 
6160   // Build the resultant type of the load
6161   const Type *type;
6162   if (bt == T_OBJECT) {
6163     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6164   } else {
6165     type = Type::get_const_basic_type(bt);
6166   }
6167 
6168   if (is_vol) {
6169     decorators |= MO_SEQ_CST;
6170   }
6171 
6172   return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
6173 }
6174 
6175 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6176                                                  bool is_exact, bool is_static,
6177                                                  ciInstanceKlass * fromKls) {
6178   if (fromKls == NULL) {
6179     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6180     assert(tinst != NULL, "obj is null");
6181     assert(tinst->klass()->is_loaded(), "obj is not loaded");
6182     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6183     fromKls = tinst->klass()->as_instance_klass();
6184   }
6185   else {
6186     assert(is_static, "only for static field access");
6187   }
6188   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6189     ciSymbol::make(fieldTypeString),
6190     is_static);
6191 
6192   assert(field != NULL, "undefined field");
6193   assert(!field->is_volatile(), "not defined for volatile fields");
6194 
6195   if (is_static) {
6196     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
6197     fromObj = makecon(tip);

7261   ciKlass* klass = NULL;
7262   if (klass_name != NULL) {
7263     klass = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_name));
7264   }
7265   if ((klass == NULL) || !klass->is_loaded()) {
7266     // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
7267     Node* ctrl = control();
7268     set_control(top()); // no intrinsic path
7269     return ctrl;
7270   }
7271   ciInstanceKlass* instklass = klass->as_instance_klass();
7272 
7273   Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
7274   Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7275   Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7276   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
7277 
7278   return instof_false;  // even if it is NULL
7279 }
7280 
7281 bool LibraryCallKit::inline_continuation_do_yield() {
7282   address call_addr = StubRoutines::cont_doYield();
7283   const TypeFunc* tf = OptoRuntime::continuation_doYield_Type();
7284   Node* call = make_runtime_call(RC_NO_LEAF, tf, call_addr, "doYield", TypeRawPtr::BOTTOM);
7285   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7286   set_result(result);
7287   return true;
7288 }
7289 
7290 //-------------inline_fma-----------------------------------
7291 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
7292   Node *a = NULL;
7293   Node *b = NULL;
7294   Node *c = NULL;
7295   Node* result = NULL;
7296   switch (id) {
7297   case vmIntrinsics::_fmaD:
7298     assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
7299     // no receiver since it is static method
7300     a = round_double_node(argument(0));
7301     b = round_double_node(argument(2));
7302     c = round_double_node(argument(4));
7303     result = _gvn.transform(new FmaDNode(control(), a, b, c));
7304     break;
7305   case vmIntrinsics::_fmaF:
7306     assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
7307     a = argument(0);
7308     b = argument(1);
7309     c = argument(2);
< prev index next >