< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"
  49 #include "opto/parse.hpp"
  50 #include "opto/runtime.hpp"
  51 #include "opto/rootnode.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "prims/unsafe.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/powerOfTwo.hpp"
  59 
  60 #if INCLUDE_JFR
  61 #include "jfr/jfr.hpp"

  62 #endif
  63 
  64 //---------------------------make_vm_intrinsic----------------------------
  65 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  66   vmIntrinsicID id = m->intrinsic_id();
  67   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  68 
  69   if (!m->is_loaded()) {
  70     // Do not attempt to inline unloaded methods.
  71     return NULL;
  72   }
  73 
  74   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  75   bool is_available = false;
  76 
  77   {
  78     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  79     // the compiler must transition to '_thread_in_vm' state because both
  80     // methods access VM-internal data.
  81     VM_ENTRY_MARK;

 453   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 454 
 455   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 456   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 457   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 458   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 459 
 460   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 461   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 462   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 463   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 464   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 465 
 466   case vmIntrinsics::_loadFence:
 467   case vmIntrinsics::_storeFence:
 468   case vmIntrinsics::_storeStoreFence:
 469   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 470 
 471   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 472 

 473   case vmIntrinsics::_currentThread:            return inline_native_currentThread();




 474 
 475 #ifdef JFR_HAVE_INTRINSICS
 476   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
 477   case vmIntrinsics::_getClassId:               return inline_native_classID();
 478   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 479 #endif
 480   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 481   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 482   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 483   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 484   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 485   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 486   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 487   case vmIntrinsics::_getLength:                return inline_native_getLength();
 488   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 489   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 490   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 491   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 492   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 493   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 494   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 495 
 496   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 497   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 607   case vmIntrinsics::_updateDirectByteBufferCRC32C:
 608     return inline_updateDirectByteBufferCRC32C();
 609 
 610   case vmIntrinsics::_updateBytesAdler32:
 611     return inline_updateBytesAdler32();
 612   case vmIntrinsics::_updateByteBufferAdler32:
 613     return inline_updateByteBufferAdler32();
 614 
 615   case vmIntrinsics::_profileBoolean:
 616     return inline_profileBoolean();
 617   case vmIntrinsics::_isCompileConstant:
 618     return inline_isCompileConstant();
 619 
 620   case vmIntrinsics::_hasNegatives:
 621     return inline_hasNegatives();
 622 
 623   case vmIntrinsics::_fmaD:
 624   case vmIntrinsics::_fmaF:
 625     return inline_fma(intrinsic_id());
 626 



 627   case vmIntrinsics::_isDigit:
 628   case vmIntrinsics::_isLowerCase:
 629   case vmIntrinsics::_isUpperCase:
 630   case vmIntrinsics::_isWhitespace:
 631     return inline_character_compare(intrinsic_id());
 632 
 633   case vmIntrinsics::_min:
 634   case vmIntrinsics::_max:
 635   case vmIntrinsics::_min_strict:
 636   case vmIntrinsics::_max_strict:
 637     return inline_min_max(intrinsic_id());
 638 
 639   case vmIntrinsics::_maxF:
 640   case vmIntrinsics::_minF:
 641   case vmIntrinsics::_maxD:
 642   case vmIntrinsics::_minD:
 643   case vmIntrinsics::_maxF_strict:
 644   case vmIntrinsics::_minF_strict:
 645   case vmIntrinsics::_maxD_strict:
 646   case vmIntrinsics::_minD_strict:

 865   record_for_igvn(bailout);
 866   if (char_count) {
 867     // Convert char count to byte count
 868     count = _gvn.transform(new LShiftINode(count, intcon(1)));
 869   }
 870 
 871   // Offset and count must not be negative
 872   generate_negative_guard(offset, bailout);
 873   generate_negative_guard(count, bailout);
 874   // Offset + count must not exceed length of array
 875   generate_limit_guard(offset, count, load_array_length(array), bailout);
 876 
 877   if (bailout->req() > 1) {
 878     PreserveJVMState pjvms(this);
 879     set_control(_gvn.transform(bailout));
 880     uncommon_trap(Deoptimization::Reason_intrinsic,
 881                   Deoptimization::Action_maybe_recompile);
 882   }
 883 }
 884 
 885 //--------------------------generate_current_thread--------------------
 886 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
 887   ciKlass*    thread_klass = env()->Thread_klass();
 888   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);


 889   Node* thread = _gvn.transform(new ThreadLocalNode());
 890   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
 891   tls_output = thread;
 892   Node* thread_obj_handle = LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);





 893   thread_obj_handle = _gvn.transform(thread_obj_handle);
 894   return access_load(thread_obj_handle, thread_type, T_OBJECT, IN_NATIVE | C2_IMMUTABLE_MEMORY);





 895 }
 896 











 897 
 898 //------------------------------make_string_method_node------------------------
 899 // Helper method for String intrinsic functions. This version is called with
 900 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
 901 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
 902 // containing the lengths of str1 and str2.
 903 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
 904   Node* result = NULL;
 905   switch (opcode) {
 906   case Op_StrIndexOf:
 907     result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
 908                                 str1_start, cnt1, str2_start, cnt2, ae);
 909     break;
 910   case Op_StrComp:
 911     result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
 912                              str1_start, cnt1, str2_start, cnt2, ae);
 913     break;
 914   case Op_StrEquals:
 915     // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
 916     // Use the constant length if there is one because optimized match rule may exist.

2903       Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
2904       ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
2905     } __ else_(); {
2906       // void class case
2907       ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1)));
2908     } __ end_if();
2909 
2910     Node* signaled_flag_address = makecon(TypeRawPtr::make(Jfr::signal_address()));
2911     Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
2912     __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
2913       ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
2914     } __ end_if();
2915   } __ end_if();
2916 
2917   final_sync(ideal);
2918   set_result(ideal.value(result));
2919 #undef __
2920   return true;
2921 }
2922 






























2923 bool LibraryCallKit::inline_native_getEventWriter() {








2924   Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
2925 
2926   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
2927                                   in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
2928 

2929   Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2930 
2931   Node* jobj_cmp_null = _gvn.transform( new CmpPNode(jobj, null()) );
2932   Node* test_jobj_eq_null  = _gvn.transform( new BoolNode(jobj_cmp_null, BoolTest::eq) );
































































































































































































2933 
2934   IfNode* iff_jobj_null =
2935     create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);

2936 
2937   enum { _normal_path = 1,
2938          _null_path = 2,
2939          PATH_LIMIT };
2940 
2941   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
2942   PhiNode*    result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);

2943 
2944   Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
2945   result_rgn->init_req(_null_path, jobj_is_null);
2946   result_val->init_req(_null_path, null());
2947 
2948   Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
2949   set_control(jobj_is_not_null);
2950   Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
2951                           IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
2952   result_rgn->init_req(_normal_path, control());
2953   result_val->init_req(_normal_path, res);
2954 
2955   set_result(result_rgn, result_val);
2956 




2957   return true;
2958 }
2959 
2960 #endif // JFR_HAVE_INTRINSICS






































2961 
2962 //------------------------inline_native_currentThread------------------
2963 bool LibraryCallKit::inline_native_currentThread() {
2964   Node* junk = NULL;
2965   set_result(generate_current_thread(junk));


















2966   return true;
2967 }
2968 
2969 //---------------------------load_mirror_from_klass----------------------------
2970 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2971 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2972   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
2973   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
2974   // mirror = ((OopHandle)mirror)->resolve();
2975   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
2976 }
2977 
2978 //-----------------------load_klass_from_mirror_common-------------------------
2979 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2980 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2981 // and branch to the given path on the region.
2982 // If never_see_null, take an uncommon trap on null, so we can optimistically
2983 // compile for the non-null case.
2984 // If the region is NULL, force never_see_null = true.
2985 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,

5809 
5810   RegionNode* region = new RegionNode(3);
5811   PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
5812 
5813   Node* if_true = _gvn.transform(new IfTrueNode(if_node));
5814   region->init_req(1, if_true);
5815   phi->init_req(1, intcon(1));
5816 
5817   Node* if_false = _gvn.transform(new IfFalseNode(if_node));
5818   region->init_req(2, if_false);
5819   phi->init_req(2, intcon(0));
5820 
5821   set_control(_gvn.transform(region));
5822   record_for_igvn(region);
5823   set_result(_gvn.transform(phi));
5824   return true;
5825 }
5826 
5827 
5828 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
5829                                              DecoratorSet decorators = IN_HEAP, bool is_static = false,
5830                                              ciInstanceKlass* fromKls = NULL) {
5831   if (fromKls == NULL) {
5832     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5833     assert(tinst != NULL, "obj is null");
5834     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5835     fromKls = tinst->klass()->as_instance_klass();
5836   } else {
5837     assert(is_static, "only for static field access");
5838   }
5839   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5840                                               ciSymbol::make(fieldTypeString),
5841                                               is_static);
5842 
5843   assert (field != NULL, "undefined field");
5844   if (field == NULL) return (Node *) NULL;
5845 
5846   if (is_static) {
5847     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5848     fromObj = makecon(tip);
5849   }
5850 
5851   // Next code  copied from Parse::do_get_xxx():
5852 
5853   // Compute address and memory type.
5854   int offset  = field->offset_in_bytes();
5855   bool is_vol = field->is_volatile();
5856   ciType* field_klass = field->type();
5857   assert(field_klass->is_loaded(), "should be loaded");
5858   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5859   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5860   BasicType bt = field->layout_type();
5861 
5862   // Build the resultant type of the load
5863   const Type *type;
5864   if (bt == T_OBJECT) {
5865     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5866   } else {
5867     type = Type::get_const_basic_type(bt);
5868   }
5869 
5870   if (is_vol) {
5871     decorators |= MO_SEQ_CST;
5872   }
5873 
5874   return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
5875 }
5876 
5877 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5878                                                  bool is_exact = true, bool is_static = false,
5879                                                  ciInstanceKlass * fromKls = NULL) {
5880   if (fromKls == NULL) {
5881     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5882     assert(tinst != NULL, "obj is null");
5883     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5884     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5885     fromKls = tinst->klass()->as_instance_klass();
5886   }
5887   else {
5888     assert(is_static, "only for static field access");
5889   }
5890   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5891     ciSymbol::make(fieldTypeString),
5892     is_static);
5893 
5894   assert(field != NULL, "undefined field");
5895   assert(!field->is_volatile(), "not defined for volatile fields");
5896 
5897   if (is_static) {
5898     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5899     fromObj = makecon(tip);

6947   ciKlass* klass = NULL;
6948   if (klass_name != NULL) {
6949     klass = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_name));
6950   }
6951   if ((klass == NULL) || !klass->is_loaded()) {
6952     // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6953     Node* ctrl = control();
6954     set_control(top()); // no intrinsic path
6955     return ctrl;
6956   }
6957   ciInstanceKlass* instklass = klass->as_instance_klass();
6958 
6959   Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
6960   Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6961   Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6962   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6963 
6964   return instof_false;  // even if it is NULL
6965 }
6966 









6967 //-------------inline_fma-----------------------------------
6968 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
6969   Node *a = NULL;
6970   Node *b = NULL;
6971   Node *c = NULL;
6972   Node* result = NULL;
6973   switch (id) {
6974   case vmIntrinsics::_fmaD:
6975     assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
6976     // no receiver since it is static method
6977     a = round_double_node(argument(0));
6978     b = round_double_node(argument(2));
6979     c = round_double_node(argument(4));
6980     result = _gvn.transform(new FmaDNode(control(), a, b, c));
6981     break;
6982   case vmIntrinsics::_fmaF:
6983     assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
6984     a = argument(0);
6985     b = argument(1);
6986     c = argument(2);

  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/opaquenode.hpp"
  49 #include "opto/parse.hpp"
  50 #include "opto/runtime.hpp"
  51 #include "opto/rootnode.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "prims/unsafe.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/powerOfTwo.hpp"
  59 
  60 #if INCLUDE_JFR
  61 #include "jfr/jfr.hpp"
  62 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp" // FIXME
  63 #endif
  64 
  65 //---------------------------make_vm_intrinsic----------------------------
  66 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  67   vmIntrinsicID id = m->intrinsic_id();
  68   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  69 
  70   if (!m->is_loaded()) {
  71     // Do not attempt to inline unloaded methods.
  72     return NULL;
  73   }
  74 
  75   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
  76   bool is_available = false;
  77 
  78   {
  79     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
  80     // the compiler must transition to '_thread_in_vm' state because both
  81     // methods access VM-internal data.
  82     VM_ENTRY_MARK;

 454   case vmIntrinsics::_compareAndExchangeLongRelease:       return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
 455 
 456   case vmIntrinsics::_getAndAddByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_add,       Volatile);
 457   case vmIntrinsics::_getAndAddShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_add,       Volatile);
 458   case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
 459   case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
 460 
 461   case vmIntrinsics::_getAndSetByte:                    return inline_unsafe_load_store(T_BYTE,   LS_get_set,       Volatile);
 462   case vmIntrinsics::_getAndSetShort:                   return inline_unsafe_load_store(T_SHORT,  LS_get_set,       Volatile);
 463   case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
 464   case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
 465   case vmIntrinsics::_getAndSetReference:               return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 466 
 467   case vmIntrinsics::_loadFence:
 468   case vmIntrinsics::_storeFence:
 469   case vmIntrinsics::_storeStoreFence:
 470   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 471 
 472   case vmIntrinsics::_onSpinWait:               return inline_onspinwait();
 473 
 474   case vmIntrinsics::_currentThread0:           return inline_native_currentThread0();
 475   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 476   case vmIntrinsics::_setCurrentThread:         return inline_native_setCurrentThread();
 477 
 478   case vmIntrinsics::_scopeLocalCache:          return inline_native_scopeLocalCache();
 479   case vmIntrinsics::_setScopeLocalCache:       return inline_native_setScopeLocalCache();
 480 
 481 #ifdef JFR_HAVE_INTRINSICS
 482   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");

 483   case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
 484 #endif
 485   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 486   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 487   case vmIntrinsics::_writeback0:               return inline_unsafe_writeback0();
 488   case vmIntrinsics::_writebackPreSync0:        return inline_unsafe_writebackSync0(true);
 489   case vmIntrinsics::_writebackPostSync0:       return inline_unsafe_writebackSync0(false);
 490   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 491   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 492   case vmIntrinsics::_getLength:                return inline_native_getLength();
 493   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 494   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 495   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 496   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 497   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 498   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 499   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 500 
 501   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 502   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);

 612   case vmIntrinsics::_updateDirectByteBufferCRC32C:
 613     return inline_updateDirectByteBufferCRC32C();
 614 
 615   case vmIntrinsics::_updateBytesAdler32:
 616     return inline_updateBytesAdler32();
 617   case vmIntrinsics::_updateByteBufferAdler32:
 618     return inline_updateByteBufferAdler32();
 619 
 620   case vmIntrinsics::_profileBoolean:
 621     return inline_profileBoolean();
 622   case vmIntrinsics::_isCompileConstant:
 623     return inline_isCompileConstant();
 624 
 625   case vmIntrinsics::_hasNegatives:
 626     return inline_hasNegatives();
 627 
 628   case vmIntrinsics::_fmaD:
 629   case vmIntrinsics::_fmaF:
 630     return inline_fma(intrinsic_id());
 631 
 632   case vmIntrinsics::_Continuation_doYield:
 633     return inline_continuation_do_yield();
 634 
 635   case vmIntrinsics::_isDigit:
 636   case vmIntrinsics::_isLowerCase:
 637   case vmIntrinsics::_isUpperCase:
 638   case vmIntrinsics::_isWhitespace:
 639     return inline_character_compare(intrinsic_id());
 640 
 641   case vmIntrinsics::_min:
 642   case vmIntrinsics::_max:
 643   case vmIntrinsics::_min_strict:
 644   case vmIntrinsics::_max_strict:
 645     return inline_min_max(intrinsic_id());
 646 
 647   case vmIntrinsics::_maxF:
 648   case vmIntrinsics::_minF:
 649   case vmIntrinsics::_maxD:
 650   case vmIntrinsics::_minD:
 651   case vmIntrinsics::_maxF_strict:
 652   case vmIntrinsics::_minF_strict:
 653   case vmIntrinsics::_maxD_strict:
 654   case vmIntrinsics::_minD_strict:

 873   record_for_igvn(bailout);
 874   if (char_count) {
 875     // Convert char count to byte count
 876     count = _gvn.transform(new LShiftINode(count, intcon(1)));
 877   }
 878 
 879   // Offset and count must not be negative
 880   generate_negative_guard(offset, bailout);
 881   generate_negative_guard(count, bailout);
 882   // Offset + count must not exceed length of array
 883   generate_limit_guard(offset, count, load_array_length(array), bailout);
 884 
 885   if (bailout->req() > 1) {
 886     PreserveJVMState pjvms(this);
 887     set_control(_gvn.transform(bailout));
 888     uncommon_trap(Deoptimization::Reason_intrinsic,
 889                   Deoptimization::Action_maybe_recompile);
 890   }
 891 }
 892 
 893 Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
 894                                             bool is_immutable) {
 895   ciKlass* thread_klass = env()->Thread_klass();
 896   const Type* thread_type
 897     = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
 898 
 899   Node* thread = _gvn.transform(new ThreadLocalNode());
 900   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(handle_offset));
 901   tls_output = thread;
 902 
 903   Node* thread_obj_handle
 904     = (is_immutable
 905       ? LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
 906         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
 907       : make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
 908   thread_obj_handle = _gvn.transform(thread_obj_handle);
 909 
 910   DecoratorSet decorators = IN_NATIVE;
 911   if (is_immutable) {
 912     decorators |= C2_IMMUTABLE_MEMORY;
 913   }
 914   return access_load(thread_obj_handle, thread_type, T_OBJECT, decorators);
 915 }
 916 
 917 //--------------------------generate_current_thread--------------------
 918 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
 919   return current_thread_helper(tls_output, JavaThread::threadObj_offset(),
 920                                /*is_immutable*/false);
 921 }
 922 
 923 //--------------------------generate_virtual_thread--------------------
 924 Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) {
 925   return current_thread_helper(tls_output, JavaThread::vthread_offset(),
 926                                !C->method()->changes_current_thread());
 927 }
 928 
 929 //------------------------------make_string_method_node------------------------
 930 // Helper method for String intrinsic functions. This version is called with
 931 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
 932 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
 933 // containing the lengths of str1 and str2.
 934 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
 935   Node* result = NULL;
 936   switch (opcode) {
 937   case Op_StrIndexOf:
 938     result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
 939                                 str1_start, cnt1, str2_start, cnt2, ae);
 940     break;
 941   case Op_StrComp:
 942     result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
 943                              str1_start, cnt1, str2_start, cnt2, ae);
 944     break;
 945   case Op_StrEquals:
 946     // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
 947     // Use the constant length if there is one because optimized match rule may exist.

2934       Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
2935       ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
2936     } __ else_(); {
2937       // void class case
2938       ideal.set(result, _gvn.transform(longcon(LAST_TYPE_ID + 1)));
2939     } __ end_if();
2940 
2941     Node* signaled_flag_address = makecon(TypeRawPtr::make(Jfr::signal_address()));
2942     Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
2943     __ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
2944       ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
2945     } __ end_if();
2946   } __ end_if();
2947 
2948   final_sync(ideal);
2949   set_result(ideal.value(result));
2950 #undef __
2951   return true;
2952 }
2953 
2954 /*
2955     jobject h_event_writer = Thread::jfr_thread_local()->java_event_writer();
2956     if (h_event_writer == NULL) {
2957       return NULL;
2958     }
2959     oop threadObj = Thread::threadObj();
2960     oop vthread = java_lang_Thread::vthread(threadObj);
2961     traceid tid;
2962     if (vthread != threadObj) {  // i.e. current thread is virtual
2963       traceid value = java_lang_VirtualThread::tid(vthread);
2964       tid = value & tid_mask;
2965       traceid epoch = value >> epoch_shift;
2966       traceid current_epoch = JfrTraceIdEpoch::current_generation();
2967       if (epoch != current_epoch) {
2968         traceid update_value = current_epoch << epoch_shift;
2969         update_value |= tid;
2970         java_lang_VirtualThread::set_tid(vthread, update_value);
2971         write_checkpoint(tid);
2972       }
2973     } else {
2974       tid = java_lang_Thread::tid(threadObj);
2975     }
2976     oop event_writer = JNIHandles::resolve_non_null(h_event_writer);
2977     traceid tid_in_event_writer = getField(event_writer, "threadID");
2978     if (tid_in_event_writer != tid) {
2979       setField(event_writer, "threadID", tid);
2980     }
2981     return event_writer;
2982 
2983  */
2984 bool LibraryCallKit::inline_native_getEventWriter() {
2985   enum { _true_path = 1, _false_path = 2, PATH_LIMIT };
2986 
2987   // save input memory and i_o state
2988   Node* input_memory_state = reset_memory();
2989   set_all_memory(input_memory_state);
2990   Node* input_io_state = i_o();
2991 
2992   // TLS
2993   Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
2994 
2995   // load offset of jfr_thread_local
2996   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr, in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
2997 
2998   // Load eventwriter jobject handle from the jfr_thread_local
2999   Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
3000 
3001   // Null check the jobject handle
3002   Node* jobj_cmp_null = _gvn.transform(new CmpPNode(jobj, null()));
3003   Node* test_jobj_ne_null = _gvn.transform(new BoolNode(jobj_cmp_null, BoolTest::ne));
3004   IfNode* iff_jobj_ne_null = create_and_map_if(control(), test_jobj_ne_null, PROB_MAX, COUNT_UNKNOWN);
3005 
3006   // false path, jobj is null
3007   Node* jobj_is_null = _gvn.transform(new IfFalseNode(iff_jobj_ne_null));
3008 
3009   // true path, jobj is not null
3010   Node* jobj_is_not_null = _gvn.transform(new IfTrueNode(iff_jobj_ne_null));
3011 
3012   // load the threadObj for the CarrierThread
3013   Node* const threadObj = generate_current_thread(tls_ptr);
3014 
3015   // load the vthread field
3016   Node* const vthreadObj = generate_virtual_thread(tls_ptr);
3017 
3018   // vthread != threadObj
3019   RegionNode* threadObj_result_rgn = new RegionNode(PATH_LIMIT);
3020   record_for_igvn(threadObj_result_rgn);
3021   PhiNode*    thread_id_mem = new PhiNode(threadObj_result_rgn, Type::MEMORY, TypePtr::BOTTOM);
3022   PhiNode*    thread_id_io = new PhiNode(threadObj_result_rgn, Type::ABIO);
3023   record_for_igvn(thread_id_io);
3024   PhiNode*    thread_id_val = new PhiNode(threadObj_result_rgn, TypeLong::LONG);
3025   record_for_igvn(thread_id_val);
3026 
3027   // If vthread != thread, this is a virtual thread
3028   Node* vthreadObj_cmp_threadObj = _gvn.transform(new CmpPNode(vthreadObj, threadObj));
3029   Node* test_vthreadObj_ne_threadObj = _gvn.transform(new BoolNode(vthreadObj_cmp_threadObj, BoolTest::ne));
3030   IfNode* iff_vthreadObj_ne_threadObj =
3031     create_and_map_if(jobj_is_not_null, test_vthreadObj_ne_threadObj, PROB_FAIR, COUNT_UNKNOWN);
3032 
3033   // false branch, fallback to threadObj
3034   Node* virtual_thread_is_threadObj = _gvn.transform(new IfFalseNode(iff_vthreadObj_ne_threadObj));
3035   Node* thread_obj_tid = load_field_from_object(threadObj, "tid", "J");
3036 
3037   // true branch, this is a virtual thread
3038   Node* virtual_thread_is_not_threadObj = _gvn.transform(new IfTrueNode(iff_vthreadObj_ne_threadObj));
3039   // read the thread id from the vthread
3040   Node* vthread_obj_tid_value = load_field_from_object(vthreadObj, "tid", "J");
3041 
3042   // bit shift and mask
3043   Node* const epoch_shift = _gvn.intcon(jfr_epoch_shift);
3044   Node* const tid_mask = _gvn.longcon(jfr_id_mask);
3045 
3046   // mask off the epoch information from the thread id
3047   Node* const vthread_obj_tid = _gvn.transform(new AndLNode(vthread_obj_tid_value, tid_mask));
3048   // shift thread id value down for last epoch
3049   Node* const vthread_epoch = _gvn.transform(new URShiftLNode(vthread_obj_tid_value, epoch_shift));
3050 
3051   // epoch compare
3052   RegionNode* epoch_compare_rgn = new RegionNode(PATH_LIMIT);
3053   record_for_igvn(epoch_compare_rgn);
3054   PhiNode*    epoch_compare_mem = new PhiNode(epoch_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3055   record_for_igvn(epoch_compare_mem);
3056   PhiNode*    epoch_compare_io = new PhiNode(epoch_compare_rgn, Type::ABIO);
3057   record_for_igvn(epoch_compare_io);
3058 
3059   Node* saved_ctl = control();
3060   set_control(virtual_thread_is_not_threadObj);
3061   TypePtr* const no_memory_effects = NULL;
3062   // make a runtime call to get the current epoch
3063   Node* call_epoch_generation = make_runtime_call(RC_LEAF | RC_NO_FP,
3064                                                   OptoRuntime::void_long_Type(),
3065                                                   (address)JFR_EPOCH_GENERATION_FUNCTION,
3066                                                   "epoch_generation", no_memory_effects);
3067   // restore
3068   set_control(saved_ctl);
3069 
3070   Node* current_epoch_gen_control = _gvn.transform(new ProjNode(call_epoch_generation, TypeFunc::Control));
3071   Node* current_epoch_gen_value = _gvn.transform(new ProjNode(call_epoch_generation, TypeFunc::Parms));
3072 
3073   // compare epoch in vthread to the current epoch generation
3074   Node* const epoch_cmp = _gvn.transform(new CmpLNode(current_epoch_gen_value, vthread_epoch));
3075   Node* test_epoch_ne = _gvn.transform(new BoolNode(epoch_cmp, BoolTest::ne));
3076   IfNode* iff_epoch_ne = create_and_map_if(current_epoch_gen_control, test_epoch_ne, PROB_FAIR, COUNT_UNKNOWN);
3077 
3078   // true path, epochs are not equal, there is a need to write a checkpoint for the vthread
3079   Node* epoch_is_not_equal = _gvn.transform(new IfTrueNode(iff_epoch_ne));
3080   // get the field offset for storing and updated tid and epoch value
3081   Node* const tid_field_address = field_address_from_object(vthreadObj, "tid", "J", false);
3082   const TypePtr* tid_field_address_type = _gvn.type(tid_field_address)->isa_ptr();
3083 
3084   // shift up current epoch generation value
3085   Node* left_shifted_current_epoch_gen = _gvn.transform(new LShiftLNode(current_epoch_gen_value, epoch_shift));
3086   // OR the shifted epoch generation value with the threadid
3087   Node* current_epoch_gen_and_tid = _gvn.transform(new OrLNode(vthread_obj_tid, left_shifted_current_epoch_gen));
3088   // store back the current_epoch_gen_and_tid into the vthreadObject
3089   Node* vthreadObj_epoch_gen_memory_store = store_to_memory(epoch_is_not_equal,
3090                                                             tid_field_address,
3091                                                             current_epoch_gen_and_tid,
3092                                                             T_LONG,
3093                                                             tid_field_address_type,
3094                                                             MemNode::unordered);
3095 
3096   // call out to the VM in order to write a checkpoint for the vthread
3097   saved_ctl = control();
3098   set_control(epoch_is_not_equal);
3099   // call can safepoint
3100   Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
3101                                                   OptoRuntime::jfr_write_checkpoint_Type(),
3102                                                   StubRoutines::jfr_write_checkpoint(),
3103                                                   "write_checkpoint", TypePtr::BOTTOM, vthread_obj_tid, top());
3104   // restore
3105   set_control(saved_ctl);
3106   Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));
3107 
3108   // false path, epochs are the same, no need to write new checkpoint information
3109   Node* epoch_is_equal = _gvn.transform(new IfFalseNode(iff_epoch_ne));
3110 
3111   // need memory and IO
3112   epoch_compare_rgn->init_req(_true_path, call_write_checkpoint_control);
3113   epoch_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3114   epoch_compare_io->init_req(_true_path, i_o());
3115   epoch_compare_rgn->init_req(_false_path, epoch_is_equal);
3116   epoch_compare_mem->init_req(_false_path, input_memory_state);
3117   epoch_compare_io->init_req(_false_path, input_io_state);
3118 
3119   // merge the threadObj branch
3120   threadObj_result_rgn->init_req(_true_path, _gvn.transform(epoch_compare_rgn));
3121   threadObj_result_rgn->init_req(_false_path, virtual_thread_is_threadObj);
3122   thread_id_mem->init_req(_true_path, _gvn.transform(epoch_compare_mem));
3123   thread_id_mem->init_req(_false_path, input_memory_state);
3124   thread_id_io->init_req(_true_path, _gvn.transform(epoch_compare_io));
3125   thread_id_io->init_req(_false_path, input_io_state);
3126   thread_id_val->init_req(_true_path, _gvn.transform(vthread_obj_tid));
3127   thread_id_val->init_req(_false_path, _gvn.transform(thread_obj_tid));
3128 
3129   // update memory and io state
3130   set_all_memory(_gvn.transform(thread_id_mem));
3131   set_i_o(_gvn.transform(thread_id_io));
3132 
3133   // load the event writer oop by dereferencing the jobject handle
3134   saved_ctl = control();
3135   set_control(_gvn.transform(threadObj_result_rgn));
3136   ciKlass* klass_EventWriter = env()->find_system_klass(ciSymbol::make("jdk/jfr/internal/EventWriter"));
3137   assert(klass_EventWriter->is_loaded(), "invariant");
3138   ciInstanceKlass* const instklass_EventWriter = klass_EventWriter->as_instance_klass();
3139   const TypeKlassPtr* const aklass = TypeKlassPtr::make(instklass_EventWriter);
3140   const TypeOopPtr* const xtype = aklass->as_instance_type();
3141   Node* event_writer = access_load(jobj, xtype, T_OBJECT, IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3142   // restore
3143   set_control(saved_ctl);
3144 
3145   // load the current thread id from the event writer object
3146   Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
3147   // get the field offset to store an updated tid value later (conditionally)
3148   Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
3149   const TypePtr* event_writer_tid_field_type = _gvn.type(event_writer_tid_field)->isa_ptr();
3150 
3151   // thread id compare
3152   RegionNode* tid_compare_rgn = new RegionNode(PATH_LIMIT);
3153   record_for_igvn(tid_compare_rgn);
3154   PhiNode*    tid_compare_mem = new PhiNode(tid_compare_rgn, Type::MEMORY, TypePtr::BOTTOM);
3155   record_for_igvn(tid_compare_mem);
3156   PhiNode*    tid_compare_io = new PhiNode(tid_compare_rgn, Type::ABIO);
3157   record_for_igvn(tid_compare_io);
3158 
3159   // compare current tid to what is stored in the event writer object
3160   Node* const tid_cmp = _gvn.transform(new CmpLNode(event_writer_tid, _gvn.transform(thread_id_val)));
3161   Node* test_tid_ne = _gvn.transform(new BoolNode(tid_cmp, BoolTest::ne));
3162   IfNode* iff_tid_ne = create_and_map_if(_gvn.transform(threadObj_result_rgn), test_tid_ne, PROB_FAIR, COUNT_UNKNOWN);
3163 
3164   // true path, tid not equal, need to store tid value to the event writer
3165   Node* tid_is_not_equal = _gvn.transform(new IfTrueNode(iff_tid_ne));
3166   record_for_igvn(tid_is_not_equal);
3167   // update the event writer with the current thread id value
3168   Node* event_writer_tid_memory_store = store_to_memory(tid_is_not_equal,
3169                                                         event_writer_tid_field,
3170                                                         thread_id_val,
3171                                                         T_LONG,
3172                                                         event_writer_tid_field_type,
3173                                                         MemNode::unordered);
3174 
3175   // false path, tids are the same, no update
3176   Node* tid_is_equal = _gvn.transform(new IfFalseNode(iff_tid_ne));
3177 
3178   // update controls
3179   tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
3180   tid_compare_rgn->init_req(_false_path, tid_is_equal);
3181 
3182   // update memory phi node
3183   tid_compare_mem->init_req(_true_path, _gvn.transform(reset_memory()));
3184   tid_compare_mem->init_req(_false_path, _gvn.transform(thread_id_mem));
3185 
3186   // update io phi node
3187   tid_compare_io->init_req(_true_path, _gvn.transform(i_o()));
3188   tid_compare_io->init_req(_false_path, _gvn.transform(thread_id_io));
3189 
3190   // result of top level CFG, Memory, IO and Value
3191   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3192   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3193   PhiNode*    result_io = new PhiNode(result_reg, Type::ABIO);
3194   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::BOTTOM);
3195 
3196   // result control
3197   result_reg->init_req(_true_path, _gvn.transform(tid_compare_rgn));
3198   result_reg->init_req(_false_path, jobj_is_null);
3199 
3200   // result memory
3201   result_mem->init_req(_true_path, _gvn.transform(tid_compare_mem));
3202   result_mem->init_req(_false_path, _gvn.transform(input_memory_state));
3203 
3204   // result io
3205   result_io->init_req(_true_path, _gvn.transform(tid_compare_io));
3206   result_io->init_req(_false_path, _gvn.transform(input_io_state));
3207 
3208   // result values
3209   result_val->init_req(_true_path, _gvn.transform(event_writer)); // return event writer oop
3210   result_val->init_req(_false_path, null()); // return NULL
3211 
3212   // set output state
3213   set_all_memory(_gvn.transform(result_mem));
3214   set_i_o(_gvn.transform(result_io));
3215   set_result(result_reg, result_val);
3216   return true;
3217 }
3218 
3219 #endif // JFR_HAVE_INTRINSICS
3220 
3221 //------------------------inline_native_currentThread0------------------
3222 bool LibraryCallKit::inline_native_currentThread0() {
3223   Node* junk = NULL;
3224   set_result(generate_current_thread(junk));
3225   return true;
3226 }
3227 
3228 Node* LibraryCallKit::scopeLocalCache_helper() {
3229   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3230   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3231 
3232   bool xk = etype->klass_is_exact();
3233 
3234   Node* thread = _gvn.transform(new ThreadLocalNode());
3235   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopeLocalCache_offset()));
3236   return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
3237         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3238 }
3239 
3240 //------------------------inline_native_scopeLocalCache------------------
3241 bool LibraryCallKit::inline_native_scopeLocalCache() {
3242   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3243   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3244   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3245 
3246   // Because we create the scopeLocal cache lazily we have to make the
3247   // type of the result BotPTR.
3248   bool xk = etype->klass_is_exact();
3249   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3250   Node* cache_obj_handle = scopeLocalCache_helper();
3251   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3252 
3253   return true;
3254 }
3255 
3256 //------------------------inline_native_setScopeLocalCache------------------
3257 bool LibraryCallKit::inline_native_setScopeLocalCache() {
3258   Node* arr = argument(0);
3259   Node* cache_obj_handle = scopeLocalCache_helper();
3260 
3261   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3262   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
3263                   MemNode::unordered);
3264 
3265   return true;
3266 }
3267 
3268 //------------------------inline_native_currentThread------------------
3269 bool LibraryCallKit::inline_native_currentThread() {
3270   Node* junk = NULL;
3271   set_result(generate_virtual_thread(junk));
3272   return true;
3273 }
3274 
3275 //------------------------inline_native_setVthread------------------
3276 bool LibraryCallKit::inline_native_setCurrentThread() {
3277   assert(C->method()->changes_current_thread(),
3278          "method changes current Thread but is not annotated ChangesCurrentThread");
3279   Node* arr = argument(1);
3280   Node* thread = _gvn.transform(new ThreadLocalNode());
3281   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3282   Node* thread_obj_handle
3283     = make_load(NULL, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3284   thread_obj_handle = _gvn.transform(thread_obj_handle);
3285   const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3286   // Stores of oops to native memory not supported yet by BarrierSetC2::store_at_resolved
3287   // access_store_at(NULL, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3288   store_to_memory(control(), thread_obj_handle, arr, T_OBJECT, adr_type, MemNode::unordered);
3289 
3290   return true;
3291 }
3292 
3293 //---------------------------load_mirror_from_klass----------------------------
3294 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3295 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3296   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3297   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3298   // mirror = ((OopHandle)mirror)->resolve();
3299   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3300 }
3301 
3302 //-----------------------load_klass_from_mirror_common-------------------------
3303 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3304 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3305 // and branch to the given path on the region.
3306 // If never_see_null, take an uncommon trap on null, so we can optimistically
3307 // compile for the non-null case.
3308 // If the region is NULL, force never_see_null = true.
3309 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,

6133 
6134   RegionNode* region = new RegionNode(3);
6135   PhiNode* phi = new PhiNode(region, TypeInt::BOOL);
6136 
6137   Node* if_true = _gvn.transform(new IfTrueNode(if_node));
6138   region->init_req(1, if_true);
6139   phi->init_req(1, intcon(1));
6140 
6141   Node* if_false = _gvn.transform(new IfFalseNode(if_node));
6142   region->init_req(2, if_false);
6143   phi->init_req(2, intcon(0));
6144 
6145   set_control(_gvn.transform(region));
6146   record_for_igvn(region);
6147   set_result(_gvn.transform(phi));
6148   return true;
6149 }
6150 
6151 
6152 Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
6153                                              DecoratorSet decorators, bool is_static,
6154                                              ciInstanceKlass* fromKls) {
6155   if (fromKls == NULL) {
6156     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6157     assert(tinst != NULL, "obj is null");
6158     assert(tinst->klass()->is_loaded(), "obj is not loaded");
6159     fromKls = tinst->klass()->as_instance_klass();
6160   } else {
6161     assert(is_static, "only for static field access");
6162   }
6163   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6164                                               ciSymbol::make(fieldTypeString),
6165                                               is_static);
6166 
6167   assert (field != NULL, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName);
6168   if (field == NULL) return (Node *) NULL;
6169 
6170   if (is_static) {
6171     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
6172     fromObj = makecon(tip);
6173   }
6174 
6175   // Next code  copied from Parse::do_get_xxx():
6176 
6177   // Compute address and memory type.
6178   int offset  = field->offset_in_bytes();
6179   bool is_vol = field->is_volatile();
6180   ciType* field_klass = field->type();
6181   assert(field_klass->is_loaded(), "should be loaded");
6182   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6183   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6184   BasicType bt = field->layout_type();
6185 
6186   // Build the resultant type of the load
6187   const Type *type;
6188   if (bt == T_OBJECT) {
6189     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6190   } else {
6191     type = Type::get_const_basic_type(bt);
6192   }
6193 
6194   if (is_vol) {
6195     decorators |= MO_SEQ_CST;
6196   }
6197 
6198   return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
6199 }
6200 
6201 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6202                                                  bool is_exact, bool is_static,
6203                                                  ciInstanceKlass * fromKls) {
6204   if (fromKls == NULL) {
6205     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6206     assert(tinst != NULL, "obj is null");
6207     assert(tinst->klass()->is_loaded(), "obj is not loaded");
6208     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6209     fromKls = tinst->klass()->as_instance_klass();
6210   }
6211   else {
6212     assert(is_static, "only for static field access");
6213   }
6214   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6215     ciSymbol::make(fieldTypeString),
6216     is_static);
6217 
6218   assert(field != NULL, "undefined field");
6219   assert(!field->is_volatile(), "not defined for volatile fields");
6220 
6221   if (is_static) {
6222     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
6223     fromObj = makecon(tip);

7271   ciKlass* klass = NULL;
7272   if (klass_name != NULL) {
7273     klass = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_name));
7274   }
7275   if ((klass == NULL) || !klass->is_loaded()) {
7276     // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
7277     Node* ctrl = control();
7278     set_control(top()); // no intrinsic path
7279     return ctrl;
7280   }
7281   ciInstanceKlass* instklass = klass->as_instance_klass();
7282 
7283   Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass)));
7284   Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
7285   Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
7286   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
7287 
7288   return instof_false;  // even if it is NULL
7289 }
7290 
7291 bool LibraryCallKit::inline_continuation_do_yield() {
7292   address call_addr = StubRoutines::cont_doYield();
7293   const TypeFunc* tf = OptoRuntime::continuation_doYield_Type();
7294   Node* call = make_runtime_call(RC_NO_LEAF, tf, call_addr, "doYield", TypeRawPtr::BOTTOM);
7295   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
7296   set_result(result);
7297   return true;
7298 }
7299 
7300 //-------------inline_fma-----------------------------------
7301 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
7302   Node *a = NULL;
7303   Node *b = NULL;
7304   Node *c = NULL;
7305   Node* result = NULL;
7306   switch (id) {
7307   case vmIntrinsics::_fmaD:
7308     assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
7309     // no receiver since it is static method
7310     a = round_double_node(argument(0));
7311     b = round_double_node(argument(2));
7312     c = round_double_node(argument(4));
7313     result = _gvn.transform(new FmaDNode(control(), a, b, c));
7314     break;
7315   case vmIntrinsics::_fmaF:
7316     assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
7317     a = argument(0);
7318     b = argument(1);
7319     c = argument(2);
< prev index next >