317 bool inline_updateCRC32();
318 bool inline_updateBytesCRC32();
319 bool inline_updateByteBufferCRC32();
320 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
321 bool inline_updateBytesCRC32C();
322 bool inline_updateDirectByteBufferCRC32C();
323 bool inline_updateBytesAdler32();
324 bool inline_updateByteBufferAdler32();
325 bool inline_multiplyToLen();
326 bool inline_hasNegatives();
327 bool inline_squareToLen();
328 bool inline_mulAdd();
329 bool inline_montgomeryMultiply();
330 bool inline_montgomerySquare();
331 bool inline_bigIntegerShift(bool isRightShift);
332 bool inline_vectorizedMismatch();
333 bool inline_fma(vmIntrinsics::ID id);
334 bool inline_character_compare(vmIntrinsics::ID id);
335 bool inline_fp_min_max(vmIntrinsics::ID id);
336
337 bool inline_profileBoolean();
338 bool inline_isCompileConstant();
339 void clear_upper_avx() {
340 #ifdef X86
341 if (UseAVX >= 2) {
342 C->set_clear_upper_avx(true);
343 }
344 #endif
345 }
346 };
347
348 //---------------------------make_vm_intrinsic----------------------------
349 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
350 vmIntrinsics::ID id = m->intrinsic_id();
351 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
352
353 if (!m->is_loaded()) {
354 // Do not attempt to inline unloaded methods.
355 return NULL;
356 }
890
891 case vmIntrinsics::_hasNegatives:
892 return inline_hasNegatives();
893
894 case vmIntrinsics::_fmaD:
895 case vmIntrinsics::_fmaF:
896 return inline_fma(intrinsic_id());
897
898 case vmIntrinsics::_isDigit:
899 case vmIntrinsics::_isLowerCase:
900 case vmIntrinsics::_isUpperCase:
901 case vmIntrinsics::_isWhitespace:
902 return inline_character_compare(intrinsic_id());
903
904 case vmIntrinsics::_maxF:
905 case vmIntrinsics::_minF:
906 case vmIntrinsics::_maxD:
907 case vmIntrinsics::_minD:
908 return inline_fp_min_max(intrinsic_id());
909
910 default:
911 // If you get here, it may be that someone has added a new intrinsic
912 // to the list in vmSymbols.hpp without implementing it here.
913 #ifndef PRODUCT
914 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
915 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
916 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
917 }
918 #endif
919 return false;
920 }
921 }
922
923 Node* LibraryCallKit::try_to_predicate(int predicate) {
924 if (!jvms()->has_method()) {
925 // Root JVMState has a null method.
926 assert(map()->memory()->Opcode() == Op_Parm, "");
927 // Insert the memory aliasing node
928 set_all_memory(reset_memory());
929 }
6884 // By replacing method body with profile data (represented as ProfileBooleanNode
6885 // on IR level) we effectively disable profiling.
6886 // It enables full speed execution once optimized code is generated.
6887 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
6888 C->record_for_igvn(profile);
6889 set_result(profile);
6890 return true;
6891 } else {
6892 // Continue profiling.
6893 // Profile data isn't available at the moment. So, execute method's bytecode version.
6894 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
6895 // is compiled and counters aren't available since corresponding MethodHandle
6896 // isn't a compile-time constant.
6897 return false;
6898 }
6899 }
6900
6901 bool LibraryCallKit::inline_isCompileConstant() {
6902 Node* n = argument(0);
6903 set_result(n->is_Con() ? intcon(1) : intcon(0));
6904 return true;
6905 }
|
317 bool inline_updateCRC32();
318 bool inline_updateBytesCRC32();
319 bool inline_updateByteBufferCRC32();
320 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
321 bool inline_updateBytesCRC32C();
322 bool inline_updateDirectByteBufferCRC32C();
323 bool inline_updateBytesAdler32();
324 bool inline_updateByteBufferAdler32();
325 bool inline_multiplyToLen();
326 bool inline_hasNegatives();
327 bool inline_squareToLen();
328 bool inline_mulAdd();
329 bool inline_montgomeryMultiply();
330 bool inline_montgomerySquare();
331 bool inline_bigIntegerShift(bool isRightShift);
332 bool inline_vectorizedMismatch();
333 bool inline_fma(vmIntrinsics::ID id);
334 bool inline_character_compare(vmIntrinsics::ID id);
335 bool inline_fp_min_max(vmIntrinsics::ID id);
336
337 bool inline_addressOf();
338 bool inline_sizeOf();
339 bool inline_getReferencedObjects();
340
341 bool inline_profileBoolean();
342 bool inline_isCompileConstant();
343 void clear_upper_avx() {
344 #ifdef X86
345 if (UseAVX >= 2) {
346 C->set_clear_upper_avx(true);
347 }
348 #endif
349 }
350 };
351
352 //---------------------------make_vm_intrinsic----------------------------
353 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
354 vmIntrinsics::ID id = m->intrinsic_id();
355 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
356
357 if (!m->is_loaded()) {
358 // Do not attempt to inline unloaded methods.
359 return NULL;
360 }
894
895 case vmIntrinsics::_hasNegatives:
896 return inline_hasNegatives();
897
898 case vmIntrinsics::_fmaD:
899 case vmIntrinsics::_fmaF:
900 return inline_fma(intrinsic_id());
901
902 case vmIntrinsics::_isDigit:
903 case vmIntrinsics::_isLowerCase:
904 case vmIntrinsics::_isUpperCase:
905 case vmIntrinsics::_isWhitespace:
906 return inline_character_compare(intrinsic_id());
907
908 case vmIntrinsics::_maxF:
909 case vmIntrinsics::_minF:
910 case vmIntrinsics::_maxD:
911 case vmIntrinsics::_minD:
912 return inline_fp_min_max(intrinsic_id());
913
914 case vmIntrinsics::_sizeOf:
915 return inline_sizeOf();
916
917 case vmIntrinsics::_addressOf:
918 return inline_addressOf();
919
920 case vmIntrinsics::_getReferencedObjects:
921 return inline_getReferencedObjects();
922
923 default:
924 // If you get here, it may be that someone has added a new intrinsic
925 // to the list in vmSymbols.hpp without implementing it here.
926 #ifndef PRODUCT
927 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
928 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
929 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
930 }
931 #endif
932 return false;
933 }
934 }
935
936 Node* LibraryCallKit::try_to_predicate(int predicate) {
937 if (!jvms()->has_method()) {
938 // Root JVMState has a null method.
939 assert(map()->memory()->Opcode() == Op_Parm, "");
940 // Insert the memory aliasing node
941 set_all_memory(reset_memory());
942 }
6897 // By replacing method body with profile data (represented as ProfileBooleanNode
6898 // on IR level) we effectively disable profiling.
6899 // It enables full speed execution once optimized code is generated.
6900 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
6901 C->record_for_igvn(profile);
6902 set_result(profile);
6903 return true;
6904 } else {
6905 // Continue profiling.
6906 // Profile data isn't available at the moment. So, execute method's bytecode version.
6907 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
6908 // is compiled and counters aren't available since corresponding MethodHandle
6909 // isn't a compile-time constant.
6910 return false;
6911 }
6912 }
6913
6914 bool LibraryCallKit::inline_isCompileConstant() {
6915 Node* n = argument(0);
6916 set_result(n->is_Con() ? intcon(1) : intcon(0));
6917 return true;
6918 }
6919
6920 bool LibraryCallKit::inline_sizeOf() {
6921 if (!RuntimeSizeOf) {
6922 set_result(longcon(-1));
6923 return true;
6924 }
6925
6926 Node* obj = argument(0);
6927 Node* klass_node = load_object_klass(obj);
6928
6929 jint layout_con = Klass::_lh_neutral_value;
6930 Node* layout_val = get_layout_helper(klass_node, layout_con);
6931 int layout_is_con = (layout_val == NULL);
6932
6933 if (layout_is_con) {
6934 // Layout helper is constant, can figure out things at compile time.
6935 assert(false, "Since sizeOf is @DontInline, this path should be unvisited");
6936
6937 if (Klass::layout_helper_is_instance(layout_con)) {
6938 // Instance case: layout_con contains the size itself.
6939 Node *size = longcon(Klass::layout_helper_size_in_bytes(layout_con));
6940 set_result(size);
6941 } else {
6942 // Array case: size is round(header + element_size*arraylength).
6943 // Since arraylength is different for every array instance, we have to
6944 // compute the whole thing at runtime.
6945
6946 Node* arr_length = load_array_length(obj);
6947
6948 int round_mask = MinObjAlignmentInBytes - 1;
6949 int hsize = Klass::layout_helper_header_size(layout_con);
6950 int eshift = Klass::layout_helper_log2_element_size(layout_con);
6951
6952 if ((round_mask & ~right_n_bits(eshift)) == 0) {
6953 round_mask = 0; // strength-reduce it if it goes away completely
6954 }
6955 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
6956 Node* header_size = intcon(hsize + round_mask);
6957
6958 Node* lengthx = ConvI2X(arr_length);
6959 Node* headerx = ConvI2X(header_size);
6960
6961 Node* abody = lengthx;
6962 if (eshift != 0) {
6963 abody = _gvn.transform(new LShiftXNode(lengthx, intcon(eshift)));
6964 }
6965 Node* size = _gvn.transform( new AddXNode(headerx, abody) );
6966 if (round_mask != 0) {
6967 size = _gvn.transform( new AndXNode(size, MakeConX(~round_mask)) );
6968 }
6969 size = ConvX2L(size);
6970 set_result(size);
6971 }
6972 } else {
6973 // Layout helper is not constant, need to test for array-ness at runtime.
6974
6975 enum { _instance_path = 1, _array_path, PATH_LIMIT };
6976 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
6977 PhiNode* result_val = new PhiNode(result_reg, TypeLong::LONG);
6978 record_for_igvn(result_reg);
6979
6980 Node* array_ctl = generate_array_guard(klass_node, NULL);
6981 if (array_ctl != NULL) {
6982 // Array case: size is round(header + element_size*arraylength).
6983 // Since arraylength is different for every array instance, we have to
6984 // compute the whole thing at runtime.
6985
6986 PreserveJVMState pjvms(this);
6987 set_control(array_ctl);
6988 Node* arr_length = load_array_length(obj);
6989
6990 int round_mask = MinObjAlignmentInBytes - 1;
6991 Node* mask = intcon(round_mask);
6992
6993 Node* hss = intcon(Klass::_lh_header_size_shift);
6994 Node* hsm = intcon(Klass::_lh_header_size_mask);
6995 Node* header_size = _gvn.transform(new URShiftINode(layout_val, hss));
6996 header_size = _gvn.transform(new AndINode(header_size, hsm));
6997 header_size = _gvn.transform(new AddINode(header_size, mask));
6998
6999 // There is no need to mask or shift this value.
7000 // The semantics of LShiftINode include an implicit mask to 0x1F.
7001 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
7002 Node* elem_shift = layout_val;
7003
7004 Node* lengthx = ConvI2X(arr_length);
7005 Node* headerx = ConvI2X(header_size);
7006
7007 Node* abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift));
7008 Node* size = _gvn.transform(new AddXNode(headerx, abody));
7009 if (round_mask != 0) {
7010 size = _gvn.transform(new AndXNode(size, MakeConX(~round_mask)));
7011 }
7012 size = ConvX2L(size);
7013
7014 result_reg->init_req(_array_path, control());
7015 result_val->init_req(_array_path, size);
7016 }
7017
7018 if (!stopped()) {
7019 // Instance case: the layout helper gives us instance size almost directly,
7020 // but we need to mask out the _lh_instance_slow_path_bit.
7021 Node* size = ConvI2X(layout_val);
7022 assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
7023 Node* mask = MakeConX(~(intptr_t) right_n_bits(LogBytesPerLong));
7024 size = _gvn.transform(new AndXNode(size, mask));
7025 size = ConvX2L(size);
7026
7027 result_reg->init_req(_instance_path, control());
7028 result_val->init_req(_instance_path, size);
7029 }
7030
7031 set_result(result_reg, result_val);
7032 }
7033
7034 return true;
7035 }
7036
7037 bool LibraryCallKit::inline_addressOf() {
7038 if (!RuntimeAddressOf) {
7039 set_result(longcon(-1));
7040 return true;
7041 }
7042
7043 Node* obj = argument(0);
7044 Node* raw_val = _gvn.transform(new CastP2XNode(NULL, obj));
7045 Node* off_val = _gvn.transform(new AddXNode(raw_val, MakeConX(Universe::non_heap_offset())));
7046 Node* long_val = ConvX2L(off_val);
7047
7048 set_result(long_val);
7049 return true;
7050 }
7051
7052 bool LibraryCallKit::inline_getReferencedObjects() {
7053 Node* a1 = argument(0);
7054 Node* a2 = argument(1);
7055
7056 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
7057 OptoRuntime::get_referenced_objects_Type(),
7058 CAST_FROM_FN_PTR(address, SharedRuntime::get_referenced_objects),
7059 "get_referenced_objects",
7060 TypePtr::BOTTOM,
7061 a1, a2);
7062
7063 Node* value = _gvn.transform(new ProjNode(call, TypeFunc::Parms+0));
7064 set_result(value);
7065 return true;
7066 }
|