6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "code/vmreg.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubRoutines.hpp"
47 #include "utilities/powerOfTwo.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 uint StartNode::size_of() const { return sizeof(*this); }
55 bool StartNode::cmp( const Node &n ) const
56 { return _domain == ((StartNode&)n)._domain; }
57 const Type *StartNode::bottom_type() const { return _domain; }
58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
59 #ifndef PRODUCT
60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
62 #endif
63
64 //------------------------------Ideal------------------------------------------
65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
66 return remove_dead_region(phase, can_reshape) ? this : nullptr;
67 }
68
69 //------------------------------calling_convention-----------------------------
70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
71 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
72 }
73
74 //------------------------------Registers--------------------------------------
75 const RegMask &StartNode::in_RegMask(uint) const {
76 return RegMask::EMPTY;
77 }
78
79 //------------------------------match------------------------------------------
80 // Construct projections for incoming parameters, and their RegMask info
81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
82 switch (proj->_con) {
83 case TypeFunc::Control:
84 case TypeFunc::I_O:
85 case TypeFunc::Memory:
86 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
87 case TypeFunc::FramePtr:
88 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
89 case TypeFunc::ReturnAdr:
90 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
91 case TypeFunc::Parms:
92 default: {
93 uint parm_num = proj->_con - TypeFunc::Parms;
94 const Type *t = _domain->field_at(proj->_con);
95 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
96 return new ConNode(Type::TOP);
97 uint ideal_reg = t->ideal_reg();
98 RegMask &rm = match->_calling_convention_mask[parm_num];
99 return new MachProjNode(this,proj->_con,rm,ideal_reg);
100 }
101 }
102 return nullptr;
103 }
104
105 //------------------------------StartOSRNode----------------------------------
106 // The method start node for an on stack replacement adapter
107
108 //------------------------------osr_domain-----------------------------
109 const TypeTuple *StartOSRNode::osr_domain() {
110 const Type **fields = TypeTuple::fields(2);
111 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
112
113 return TypeTuple::make(TypeFunc::Parms+1, fields);
114 }
115
116 //=============================================================================
117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
118 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
119 };
120
121 #ifndef PRODUCT
122 void ParmNode::dump_spec(outputStream *st) const {
123 if( _con < TypeFunc::Parms ) {
124 st->print("%s", names[_con]);
125 } else {
126 st->print("Parm%d: ",_con-TypeFunc::Parms);
127 // Verbose and WizardMode dump bottom_type for all nodes
128 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
129 }
130 }
131
132 void ParmNode::dump_compact_spec(outputStream *st) const {
133 if (_con < TypeFunc::Parms) {
134 st->print("%s", names[_con]);
135 } else {
483 if (cik->is_instance_klass()) {
484 cik->print_name_on(st);
485 iklass = cik->as_instance_klass();
486 } else if (cik->is_type_array_klass()) {
487 cik->as_array_klass()->base_element_type()->print_name_on(st);
488 st->print("[%d]", spobj->n_fields());
489 } else if (cik->is_obj_array_klass()) {
490 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
491 if (cie->is_instance_klass()) {
492 cie->print_name_on(st);
493 } else if (cie->is_type_array_klass()) {
494 cie->as_array_klass()->base_element_type()->print_name_on(st);
495 } else {
496 ShouldNotReachHere();
497 }
498 st->print("[%d]", spobj->n_fields());
499 int ndim = cik->as_array_klass()->dimension() - 1;
500 while (ndim-- > 0) {
501 st->print("[]");
502 }
503 }
504 st->print("={");
505 uint nf = spobj->n_fields();
506 if (nf > 0) {
507 uint first_ind = spobj->first_index(mcall->jvms());
508 Node* fld_node = mcall->in(first_ind);
509 ciField* cifield;
510 if (iklass != nullptr) {
511 st->print(" [");
512 cifield = iklass->nonstatic_field_at(0);
513 cifield->print_name_on(st);
514 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
515 } else {
516 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
517 }
518 for (uint j = 1; j < nf; j++) {
519 fld_node = mcall->in(first_ind+j);
520 if (iklass != nullptr) {
521 st->print(", [");
522 cifield = iklass->nonstatic_field_at(j);
523 cifield->print_name_on(st);
524 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
525 } else {
526 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
527 }
528 }
529 }
530 st->print(" }");
531 }
532 }
533 st->cr();
534 if (caller() != nullptr) caller()->format(regalloc, n, st);
535 }
536
537
538 void JVMState::dump_spec(outputStream *st) const {
539 if (_method != nullptr) {
540 bool printed = false;
541 if (!Verbose) {
542 // The JVMS dumps make really, really long lines.
543 // Take out the most boring parts, which are the package prefixes.
738 tf()->dump_on(st);
739 }
740 if (_cnt != COUNT_UNKNOWN) {
741 st->print(" C=%f", _cnt);
742 }
743 const Node* const klass_node = in(KlassNode);
744 if (klass_node != nullptr) {
745 const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr();
746
747 if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) {
748 st->print(" allocationKlass:");
749 klass_ptr->exact_klass()->print_name_on(st);
750 }
751 }
752 if (jvms() != nullptr) {
753 jvms()->dump_spec(st);
754 }
755 }
756 #endif
757
758 const Type *CallNode::bottom_type() const { return tf()->range(); }
759 const Type* CallNode::Value(PhaseGVN* phase) const {
760 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
761 return Type::TOP;
762 }
763 return tf()->range();
764 }
765
766 //------------------------------calling_convention-----------------------------
767 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
768 // Use the standard compiler calling convention
769 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
770 }
771
772
773 //------------------------------match------------------------------------------
774 // Construct projections for control, I/O, memory-fields, ..., and
775 // return result(s) along with their RegMask info
776 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
777 switch (proj->_con) {
778 case TypeFunc::Control:
779 case TypeFunc::I_O:
780 case TypeFunc::Memory:
781 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
782
783 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
784 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
785 // 2nd half of doubles and longs
786 return new MachProjNode(this,proj->_con, RegMask::EMPTY, (uint)OptoReg::Bad);
787
788 case TypeFunc::Parms: { // Normal returns
789 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
790 OptoRegPair regs = Opcode() == Op_CallLeafVector
791 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
792 : is_CallRuntime()
793 ? match->c_return_value(ideal_reg) // Calls into C runtime
794 : match-> return_value(ideal_reg); // Calls into compiled Java code
795 RegMask rm = RegMask(regs.first());
796
797 if (Opcode() == Op_CallLeafVector) {
798 // If the return is in vector, compute appropriate regmask taking into account the whole range
799 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
800 if(OptoReg::is_valid(regs.second())) {
801 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
802 rm.insert(r);
803 }
804 }
805 }
806 }
807
808 if( OptoReg::is_valid(regs.second()) )
809 rm.insert(regs.second());
810 return new MachProjNode(this,proj->_con,rm,ideal_reg);
811 }
812
813 case TypeFunc::ReturnAdr:
814 case TypeFunc::FramePtr:
815 default:
816 ShouldNotReachHere();
817 }
818 return nullptr;
819 }
820
821 // Do we Match on this edge index or not? Match no edges
822 uint CallNode::match_edge(uint idx) const {
823 return 0;
824 }
825
826 //
827 // Determine whether the call could modify the field of the specified
828 // instance at the specified offset.
829 //
830 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
831 assert((t_oop != nullptr), "sanity");
832 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
833 const TypeTuple* args = _tf->domain();
834 Node* dest = nullptr;
835 // Stubs that can be called once an ArrayCopyNode is expanded have
836 // different signatures. Look for the second pointer argument,
837 // that is the destination of the copy.
838 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
839 if (args->field_at(i)->isa_ptr()) {
840 j++;
841 if (j == 2) {
842 dest = in(i);
843 break;
844 }
845 }
846 }
847 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
848 if (phase->type(dest)->isa_rawptr()) {
849 // may happen for an arraycopy that initializes a newly allocated object. Conservatively return true;
850 return true;
851 }
852 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
853 return true;
866 Node* proj = proj_out_or_null(TypeFunc::Parms);
867 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
868 return false;
869 }
870 }
871 if (is_CallJava() && as_CallJava()->method() != nullptr) {
872 ciMethod* meth = as_CallJava()->method();
873 if (meth->is_getter()) {
874 return false;
875 }
876 // May modify (by reflection) if an boxing object is passed
877 // as argument or returned.
878 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
879 if (proj != nullptr) {
880 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
881 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
882 (inst_t->instance_klass() == boxing_klass))) {
883 return true;
884 }
885 }
886 const TypeTuple* d = tf()->domain();
887 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
888 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
889 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
890 (inst_t->instance_klass() == boxing_klass))) {
891 return true;
892 }
893 }
894 return false;
895 }
896 }
897 return true;
898 }
899
900 // Does this call have a direct reference to n other than debug information?
901 bool CallNode::has_non_debug_use(Node *n) {
902 const TypeTuple * d = tf()->domain();
903 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
904 Node *arg = in(i);
905 if (arg == n) {
906 return true;
907 }
908 }
909 return false;
910 }
911
912 // Returns the unique CheckCastPP of a call
913 // or 'this' if there are several CheckCastPP or unexpected uses
914 // or returns null if there is no one.
915 Node *CallNode::result_cast() {
916 Node *cast = nullptr;
917
918 Node *p = proj_out_or_null(TypeFunc::Parms);
919 if (p == nullptr)
920 return nullptr;
921
922 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
923 Node *use = p->fast_out(i);
924 if (use->is_CheckCastPP()) {
925 if (cast != nullptr) {
926 return this; // more than 1 CheckCastPP
927 }
928 cast = use;
929 } else if (!use->is_Initialize() &&
930 !use->is_AddP() &&
931 use->Opcode() != Op_MemBarStoreStore) {
932 // Expected uses are restricted to a CheckCastPP, an Initialize
933 // node, a MemBarStoreStore (clone) and AddP nodes. If we
934 // encounter any other use (a Phi node can be seen in rare
935 // cases) return this to prevent incorrect optimizations.
936 return this;
937 }
938 }
939 return cast;
940 }
941
942
943 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) const {
944 projs->fallthrough_proj = nullptr;
945 projs->fallthrough_catchproj = nullptr;
946 projs->fallthrough_ioproj = nullptr;
947 projs->catchall_ioproj = nullptr;
948 projs->catchall_catchproj = nullptr;
949 projs->fallthrough_memproj = nullptr;
950 projs->catchall_memproj = nullptr;
951 projs->resproj = nullptr;
952 projs->exobj = nullptr;
953
954 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
955 ProjNode *pn = fast_out(i)->as_Proj();
956 if (pn->outcnt() == 0) continue;
957 switch (pn->_con) {
958 case TypeFunc::Control:
959 {
960 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
961 projs->fallthrough_proj = pn;
962 const Node* cn = pn->unique_ctrl_out_or_null();
963 if (cn != nullptr && cn->is_Catch()) {
964 ProjNode *cpn = nullptr;
965 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
966 cpn = cn->fast_out(k)->as_Proj();
967 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
968 if (cpn->_con == CatchProjNode::fall_through_index)
969 projs->fallthrough_catchproj = cpn;
970 else {
971 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
972 projs->catchall_catchproj = cpn;
978 case TypeFunc::I_O:
979 if (pn->_is_io_use)
980 projs->catchall_ioproj = pn;
981 else
982 projs->fallthrough_ioproj = pn;
983 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
984 Node* e = pn->out(j);
985 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
986 assert(projs->exobj == nullptr, "only one");
987 projs->exobj = e;
988 }
989 }
990 break;
991 case TypeFunc::Memory:
992 if (pn->_is_io_use)
993 projs->catchall_memproj = pn;
994 else
995 projs->fallthrough_memproj = pn;
996 break;
997 case TypeFunc::Parms:
998 projs->resproj = pn;
999 break;
1000 default:
1001 assert(false, "unexpected projection from allocation node.");
1002 }
1003 }
1004
1005 // The resproj may not exist because the result could be ignored
1006 // and the exception object may not exist if an exception handler
1007 // swallows the exception but all the other must exist and be found.
1008 assert(projs->fallthrough_proj != nullptr, "must be found");
1009 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1010 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1011 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
1012 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
1013 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
1014 if (separate_io_proj) {
1015 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
1016 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
1017 }
1018 }
1019
1020 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1021 #ifdef ASSERT
1022 // Validate attached generator
1023 CallGenerator* cg = generator();
1024 if (cg != nullptr) {
1025 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
1026 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1027 }
1028 #endif // ASSERT
1029 return SafePointNode::Ideal(phase, can_reshape);
1030 }
1031
1032 bool CallNode::is_call_to_arraycopystub() const {
1033 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1034 return true;
1035 }
1036 return false;
1037 }
1038
1039 bool CallNode::is_call_to_multianewarray_stub() const {
1040 if (_name != nullptr &&
1041 strstr(_name, "multianewarray") != nullptr &&
1042 strstr(_name, "C2 runtime") != nullptr) {
1043 return true;
1044 }
1045 return false;
1046 }
1047
1048 //=============================================================================
1049 uint CallJavaNode::size_of() const { return sizeof(*this); }
1050 bool CallJavaNode::cmp( const Node &n ) const {
1051 CallJavaNode &call = (CallJavaNode&)n;
1052 return CallNode::cmp(call) && _method == call._method &&
1053 _override_symbolic_info == call._override_symbolic_info;
1054 }
1055
1056 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1057 // Copy debug information and adjust JVMState information
1058 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1059 uint new_dbg_start = tf()->domain()->cnt();
1060 int jvms_adj = new_dbg_start - old_dbg_start;
1061 assert (new_dbg_start == req(), "argument count mismatch");
1062 Compile* C = phase->C;
1063
1064 // SafePointScalarObject node could be referenced several times in debug info.
1065 // Use Dict to record cloned nodes.
1066 Dict* sosn_map = new Dict(cmpkey,hashkey);
1067 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1068 Node* old_in = sfpt->in(i);
1069 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1070 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1071 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1072 bool new_node;
1073 Node* new_in = old_sosn->clone(sosn_map, new_node);
1074 if (new_node) { // New node?
1075 new_in->set_req(0, C->root()); // reset control edge
1076 new_in = phase->transform(new_in); // Register new node.
1077 }
1078 old_in = new_in;
1079 }
1080 add_req(old_in);
1081 }
1082
1083 // JVMS may be shared so clone it before we modify it
1084 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1085 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1086 jvms->set_map(this);
1087 jvms->set_locoff(jvms->locoff()+jvms_adj);
1088 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1089 jvms->set_monoff(jvms->monoff()+jvms_adj);
1090 jvms->set_scloff(jvms->scloff()+jvms_adj);
1091 jvms->set_endoff(jvms->endoff()+jvms_adj);
1092 }
1093 }
1094
1095 #ifdef ASSERT
1096 bool CallJavaNode::validate_symbolic_info() const {
1097 if (method() == nullptr) {
1098 return true; // call into runtime or uncommon trap
1099 }
1100 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1101 ciMethod* callee = method();
1102 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1103 assert(override_symbolic_info(), "should be set");
1104 }
1105 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1106 return true;
1107 }
1108 #endif
1109
1110 #ifndef PRODUCT
1111 void CallJavaNode::dump_spec(outputStream* st) const {
1112 if( _method ) _method->print_short_name(st);
1113 CallNode::dump_spec(st);
1114 }
1115
1116 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1117 if (_method) {
1118 _method->print_short_name(st);
1119 } else {
1122 }
1123 #endif
1124
1125 void CallJavaNode::register_for_late_inline() {
1126 if (generator() != nullptr) {
1127 Compile::current()->prepend_late_inline(generator());
1128 set_generator(nullptr);
1129 } else {
1130 assert(false, "repeated inline attempt");
1131 }
1132 }
1133
1134 //=============================================================================
1135 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1136 bool CallStaticJavaNode::cmp( const Node &n ) const {
1137 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1138 return CallJavaNode::cmp(call);
1139 }
1140
1141 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1142 CallGenerator* cg = generator();
1143 if (can_reshape && cg != nullptr) {
1144 if (cg->is_mh_late_inline()) {
1145 assert(IncrementalInlineMH, "required");
1146 assert(cg->call_node() == this, "mismatch");
1147 assert(cg->method()->is_method_handle_intrinsic(), "required");
1148
1149 // Check whether this MH handle call becomes a candidate for inlining.
1150 ciMethod* callee = cg->method();
1151 vmIntrinsics::ID iid = callee->intrinsic_id();
1152 if (iid == vmIntrinsics::_invokeBasic) {
1153 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1154 register_for_late_inline();
1155 }
1156 } else if (iid == vmIntrinsics::_linkToNative) {
1157 // never retry
1158 } else {
1159 assert(callee->has_member_arg(), "wrong type of call?");
1160 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1161 register_for_late_inline();
1182
1183 //----------------------------uncommon_trap_request----------------------------
1184 // If this is an uncommon trap, return the request code, else zero.
1185 int CallStaticJavaNode::uncommon_trap_request() const {
1186 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1187 }
1188 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1189 #ifndef PRODUCT
1190 if (!(call->req() > TypeFunc::Parms &&
1191 call->in(TypeFunc::Parms) != nullptr &&
1192 call->in(TypeFunc::Parms)->is_Con() &&
1193 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1194 assert(in_dump() != 0, "OK if dumping");
1195 tty->print("[bad uncommon trap]");
1196 return 0;
1197 }
1198 #endif
1199 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1200 }
1201
1202 #ifndef PRODUCT
1203 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1204 st->print("# Static ");
1205 if (_name != nullptr) {
1206 st->print("%s", _name);
1207 int trap_req = uncommon_trap_request();
1208 if (trap_req != 0) {
1209 char buf[100];
1210 st->print("(%s)",
1211 Deoptimization::format_trap_request(buf, sizeof(buf),
1212 trap_req));
1213 }
1214 st->print(" ");
1215 }
1216 CallJavaNode::dump_spec(st);
1217 }
1218
1219 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1220 if (_method) {
1221 _method->print_short_name(st);
1297 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1298 bool CallRuntimeNode::cmp( const Node &n ) const {
1299 CallRuntimeNode &call = (CallRuntimeNode&)n;
1300 return CallNode::cmp(call) && !strcmp(_name,call._name);
1301 }
1302 #ifndef PRODUCT
1303 void CallRuntimeNode::dump_spec(outputStream *st) const {
1304 st->print("# ");
1305 st->print("%s", _name);
1306 CallNode::dump_spec(st);
1307 }
1308 #endif
1309 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1310 bool CallLeafVectorNode::cmp( const Node &n ) const {
1311 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1312 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1313 }
1314
1315 //------------------------------calling_convention-----------------------------
1316 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1317 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1318 }
1319
1320 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1321 #ifdef ASSERT
1322 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1323 "return vector size must match");
1324 const TypeTuple* d = tf()->domain();
1325 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1326 Node* arg = in(i);
1327 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1328 "vector argument size must match");
1329 }
1330 #endif
1331
1332 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1333 }
1334
1335 //=============================================================================
1336 //------------------------------calling_convention-----------------------------
1337
1338
1339 //=============================================================================
1340 bool CallLeafPureNode::is_unused() const {
1341 return proj_out_or_null(TypeFunc::Parms) == nullptr;
1342 }
1343
1344 bool CallLeafPureNode::is_dead() const {
1345 return proj_out_or_null(TypeFunc::Control) == nullptr;
1346 }
1347
1348 /* We make a tuple of the global input state + TOP for the output values.
1349 * We use this to delete a pure function that is not used: by replacing the call with
1350 * such a tuple, we let output Proj's idealization pick the corresponding input of the
1351 * pure call, so jumping over it, and effectively, removing the call from the graph.
1352 * This avoids doing the graph surgery manually, but leaves that to IGVN
1353 * that is specialized for doing that right. We need also tuple components for output
1354 * values of the function to respect the return arity, and in case there is a projection
1355 * that would pick an output (which shouldn't happen at the moment).
1356 */
1357 TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const {
1358 // Transparently propagate input state but parameters
1359 TupleNode* tuple = TupleNode::make(
1360 tf()->range(),
1361 in(TypeFunc::Control),
1362 in(TypeFunc::I_O),
1363 in(TypeFunc::Memory),
1364 in(TypeFunc::FramePtr),
1365 in(TypeFunc::ReturnAdr));
1366
1367 // And add TOPs for the return values
1368 for (uint i = TypeFunc::Parms; i < tf()->range()->cnt(); i++) {
1369 tuple->set_req(i, C->top());
1370 }
1371
1372 return tuple;
1373 }
1374
1375 CallLeafPureNode* CallLeafPureNode::inline_call_leaf_pure_node(Node* control) const {
1376 Node* top = Compile::current()->top();
1377 if (control == nullptr) {
1378 control = in(TypeFunc::Control);
1379 }
1380
1381 CallLeafPureNode* call = new CallLeafPureNode(tf(), entry_point(), _name);
1382 call->init_req(TypeFunc::Control, control);
1383 call->init_req(TypeFunc::I_O, top);
1384 call->init_req(TypeFunc::Memory, top);
1385 call->init_req(TypeFunc::ReturnAdr, top);
1386 call->init_req(TypeFunc::FramePtr, top);
1387 for (unsigned int i = 0; i < tf()->domain()->cnt() - TypeFunc::Parms; i++) {
1388 call->init_req(TypeFunc::Parms + i, in(TypeFunc::Parms + i));
1389 }
1390
1391 return call;
1392 }
1393
1394 Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1395 if (is_dead()) {
1396 return nullptr;
1397 }
1398
1399 // We need to wait until IGVN because during parsing, usages might still be missing
1400 // and we would remove the call immediately.
1401 if (can_reshape && is_unused()) {
1402 // The result is not used. We remove the call by replacing it with a tuple, that
1403 // is later disintegrated by the projections.
1404 return make_tuple_of_input_state_and_top_return_values(phase->C);
1405 }
1406
1407 return CallRuntimeNode::Ideal(phase, can_reshape);
1408 }
1409
1410 #ifndef PRODUCT
1411 void CallLeafNode::dump_spec(outputStream *st) const {
1412 st->print("# ");
1413 st->print("%s", _name);
1414 CallNode::dump_spec(st);
1415 }
1416 #endif
1417
1418 //=============================================================================
1419
1420 void SafePointNode::set_local(const JVMState* jvms, uint idx, Node *c) {
1421 assert(verify_jvms(jvms), "jvms must match");
1422 int loc = jvms->locoff() + idx;
1423 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1424 // If current local idx is top then local idx - 1 could
1425 // be a long/double that needs to be killed since top could
1426 // represent the 2nd half of the long/double.
1427 uint ideal = in(loc -1)->ideal_reg();
1428 if (ideal == Op_RegD || ideal == Op_RegL) {
1429 // set other (low index) half to top
1430 set_req(loc - 1, in(loc));
1431 }
1432 }
1433 set_req(loc, c);
1434 }
1435
1436 uint SafePointNode::size_of() const { return sizeof(*this); }
1437 bool SafePointNode::cmp( const Node &n ) const {
1448 }
1449 }
1450
1451
1452 //----------------------------next_exception-----------------------------------
1453 SafePointNode* SafePointNode::next_exception() const {
1454 if (len() == req()) {
1455 return nullptr;
1456 } else {
1457 Node* n = in(req());
1458 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1459 return (SafePointNode*) n;
1460 }
1461 }
1462
1463
1464 //------------------------------Ideal------------------------------------------
1465 // Skip over any collapsed Regions
1466 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1467 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1468 return remove_dead_region(phase, can_reshape) ? this : nullptr;
1469 }
1470
1471 //------------------------------Identity---------------------------------------
1472 // Remove obviously duplicate safepoints
1473 Node* SafePointNode::Identity(PhaseGVN* phase) {
1474
1475 // If you have back to back safepoints, remove one
1476 if (in(TypeFunc::Control)->is_SafePoint()) {
1477 Node* out_c = unique_ctrl_out_or_null();
1478 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1479 // outer loop's safepoint could confuse removal of the outer loop.
1480 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1481 return in(TypeFunc::Control);
1482 }
1483 }
1484
1485 // Transforming long counted loops requires a safepoint node. Do not
1486 // eliminate a safepoint until loop opts are over.
1487 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1488 Node *n0 = in(0)->in(0);
1602 }
1603
1604 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1605 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1606 int nb = igvn->C->root()->find_prec_edge(this);
1607 if (nb != -1) {
1608 igvn->delete_precedence_of(igvn->C->root(), nb);
1609 }
1610 }
1611
1612 //============== SafePointScalarObjectNode ==============
1613
1614 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1615 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1616 _first_index(first_index),
1617 _depth(depth),
1618 _n_fields(n_fields),
1619 _alloc(alloc)
1620 {
1621 #ifdef ASSERT
1622 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1623 alloc->dump();
1624 assert(false, "unexpected call node");
1625 }
1626 #endif
1627 init_class_id(Class_SafePointScalarObject);
1628 }
1629
1630 // Do not allow value-numbering for SafePointScalarObject node.
1631 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1632 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1633 return (&n == this); // Always fail except on self
1634 }
1635
1636 uint SafePointScalarObjectNode::ideal_reg() const {
1637 return 0; // No matching to machine instruction
1638 }
1639
1640 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1641 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1642 }
1707 new_node = false;
1708 return (SafePointScalarMergeNode*)cached;
1709 }
1710 new_node = true;
1711 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1712 sosn_map->Insert((void*)this, (void*)res);
1713 return res;
1714 }
1715
1716 #ifndef PRODUCT
1717 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1718 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1719 }
1720 #endif
1721
1722 //=============================================================================
1723 uint AllocateNode::size_of() const { return sizeof(*this); }
1724
1725 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1726 Node *ctrl, Node *mem, Node *abio,
1727 Node *size, Node *klass_node, Node *initial_test)
1728 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1729 {
1730 init_class_id(Class_Allocate);
1731 init_flags(Flag_is_macro);
1732 _is_scalar_replaceable = false;
1733 _is_non_escaping = false;
1734 _is_allocation_MemBar_redundant = false;
1735 Node *topnode = C->top();
1736
1737 init_req( TypeFunc::Control , ctrl );
1738 init_req( TypeFunc::I_O , abio );
1739 init_req( TypeFunc::Memory , mem );
1740 init_req( TypeFunc::ReturnAdr, topnode );
1741 init_req( TypeFunc::FramePtr , topnode );
1742 init_req( AllocSize , size);
1743 init_req( KlassNode , klass_node);
1744 init_req( InitialTest , initial_test);
1745 init_req( ALength , topnode);
1746 init_req( ValidLengthTest , topnode);
1747 C->add_macro_node(this);
1748 }
1749
1750 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1751 {
1752 assert(initializer != nullptr && initializer->is_object_initializer(),
1753 "unexpected initializer method");
1754 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1755 if (analyzer == nullptr) {
1756 return;
1757 }
1758
1759 // Allocation node is first parameter in its initializer
1760 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1761 _is_allocation_MemBar_redundant = true;
1762 }
1763 }
1764 Node *AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1765 Node* mark_node = nullptr;
1766 if (UseCompactObjectHeaders) {
1767 Node* klass_node = in(AllocateNode::KlassNode);
1768 Node* proto_adr = phase->transform(AddPNode::make_off_heap(klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1769 mark_node = LoadNode::make(*phase, control, mem, proto_adr, phase->type(proto_adr)->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1770 } else {
1771 // For now only enable fast locking for non-array types
1772 mark_node = phase->MakeConX(markWord::prototype().value());
1773 }
1774 return mark_node;
1775 }
1776
1777 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1778 // CastII, if appropriate. If we are not allowed to create new nodes, and
1779 // a CastII is appropriate, return null.
1780 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1781 Node *length = in(AllocateNode::ALength);
1782 assert(length != nullptr, "length is not null");
1783
1784 const TypeInt* length_type = phase->find_int_type(length);
1785 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1786
1787 if (ary_type != nullptr && length_type != nullptr) {
1788 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
2150
2151 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2152 st->print("%s", _kind_names[_kind]);
2153 }
2154 #endif
2155
2156 //=============================================================================
2157 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2158
2159 // perform any generic optimizations first (returns 'this' or null)
2160 Node *result = SafePointNode::Ideal(phase, can_reshape);
2161 if (result != nullptr) return result;
2162 // Don't bother trying to transform a dead node
2163 if (in(0) && in(0)->is_top()) return nullptr;
2164
2165 // Now see if we can optimize away this lock. We don't actually
2166 // remove the locking here, we simply set the _eliminate flag which
2167 // prevents macro expansion from expanding the lock. Since we don't
2168 // modify the graph, the value returned from this function is the
2169 // one computed above.
2170 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2171 //
2172 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2173 //
2174 ConnectionGraph *cgr = phase->C->congraph();
2175 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2176 assert(!is_eliminated() || is_coarsened(), "sanity");
2177 // The lock could be marked eliminated by lock coarsening
2178 // code during first IGVN before EA. Replace coarsened flag
2179 // to eliminate all associated locks/unlocks.
2180 #ifdef ASSERT
2181 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2182 #endif
2183 this->set_non_esc_obj();
2184 return result;
2185 }
2186
2187 if (!phase->C->do_locks_coarsening()) {
2188 return result; // Compiling without locks coarsening
2189 }
2190 //
2351 }
2352
2353 //=============================================================================
2354 uint UnlockNode::size_of() const { return sizeof(*this); }
2355
2356 //=============================================================================
2357 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2358
2359 // perform any generic optimizations first (returns 'this' or null)
2360 Node *result = SafePointNode::Ideal(phase, can_reshape);
2361 if (result != nullptr) return result;
2362 // Don't bother trying to transform a dead node
2363 if (in(0) && in(0)->is_top()) return nullptr;
2364
2365 // Now see if we can optimize away this unlock. We don't actually
2366 // remove the unlocking here, we simply set the _eliminate flag which
2367 // prevents macro expansion from expanding the unlock. Since we don't
2368 // modify the graph, the value returned from this function is the
2369 // one computed above.
2370 // Escape state is defined after Parse phase.
2371 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2372 //
2373 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2374 //
2375 ConnectionGraph *cgr = phase->C->congraph();
2376 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2377 assert(!is_eliminated() || is_coarsened(), "sanity");
2378 // The lock could be marked eliminated by lock coarsening
2379 // code during first IGVN before EA. Replace coarsened flag
2380 // to eliminate all associated locks/unlocks.
2381 #ifdef ASSERT
2382 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2383 #endif
2384 this->set_non_esc_obj();
2385 }
2386 }
2387 return result;
2388 }
2389
2390 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2391 if (C == nullptr) {
2431 }
2432 // unrelated
2433 return false;
2434 }
2435
2436 if (dest_t->isa_aryptr()) {
2437 // arraycopy or array clone
2438 if (t_oop->isa_instptr()) {
2439 return false;
2440 }
2441 if (!t_oop->isa_aryptr()) {
2442 return true;
2443 }
2444
2445 const Type* elem = dest_t->is_aryptr()->elem();
2446 if (elem == Type::BOTTOM) {
2447 // An array but we don't know what elements are
2448 return true;
2449 }
2450
2451 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2452 uint dest_alias = phase->C->get_alias_index(dest_t);
2453 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2454
2455 return dest_alias == t_oop_alias;
2456 }
2457
2458 return true;
2459 }
2460
2461 PowDNode::PowDNode(Compile* C, Node* base, Node* exp)
2462 : CallLeafPureNode(
2463 OptoRuntime::Math_DD_D_Type(),
2464 StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
2465 "pow") {
2466 add_flag(Flag_is_macro);
2467 C->add_macro_node(this);
2468
2469 init_req(TypeFunc::Parms + 0, base);
2470 init_req(TypeFunc::Parms + 1, C->top()); // double slot padding
2471 init_req(TypeFunc::Parms + 2, exp);
2497 // i.e., pow(x, +/-0.0D) => 1.0
2498 if (e == 0.0) { // true for both -0.0 and +0.0
2499 result_t = TypeD::ONE;
2500 }
2501
2502 // If the second argument is NaN, then the result is NaN.
2503 // i.e., pow(x, NaN) => NaN
2504 if (g_isnan(e)) {
2505 result_t = TypeD::make(NAN);
2506 }
2507 }
2508
2509 if (result_t != nullptr) {
2510 // We can't simply return a TypeD here, it must be a tuple type to be compatible with call nodes.
2511 const Type** fields = TypeTuple::fields(2);
2512 fields[TypeFunc::Parms + 0] = result_t;
2513 fields[TypeFunc::Parms + 1] = Type::HALF;
2514 return TypeTuple::make(TypeFunc::Parms + 2, fields);
2515 }
2516
2517 return tf()->range();
2518 }
2519
2520 Node* PowDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2521 if (!can_reshape) {
2522 return nullptr; // wait for igvn
2523 }
2524
2525 PhaseIterGVN* igvn = phase->is_IterGVN();
2526 Node* base = this->base();
2527 Node* exp = this->exp();
2528
2529 const Type* t_exp = phase->type(exp);
2530 const TypeD* exp_con = t_exp->isa_double_constant();
2531
2532 // Special cases when only the exponent is known:
2533 if (exp_con != nullptr) {
2534 double e = exp_con->getd();
2535
2536 // If the second argument is 1.0, then the result is the same as the first argument.
2537 // i.e., pow(x, 1.0) => x
2584
2585 igvn->C->set_has_split_ifs(true); // Has chance for split-if optimization
2586
2587 return make_tuple_of_input_state_and_result(igvn, phi, region);
2588 }
2589 }
2590
2591 return CallLeafPureNode::Ideal(phase, can_reshape);
2592 }
2593
2594 // We can't simply have Ideal() returning a Con or MulNode since the users are still expecting a Call node, but we could
2595 // produce a tuple that follows the same pattern so users can still get control, io, memory, etc..
2596 TupleNode* PowDNode::make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control) {
2597 if (control == nullptr) {
2598 control = in(TypeFunc::Control);
2599 }
2600
2601 Compile* C = phase->C;
2602 C->remove_macro_node(this);
2603 TupleNode* tuple = TupleNode::make(
2604 tf()->range(),
2605 control,
2606 in(TypeFunc::I_O),
2607 in(TypeFunc::Memory),
2608 in(TypeFunc::FramePtr),
2609 in(TypeFunc::ReturnAdr),
2610 result,
2611 C->top());
2612 return tuple;
2613 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "code/vmreg.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/c2/barrierSetC2.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/convertnode.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/inlinetypenode.hpp"
40 #include "opto/locknode.hpp"
41 #include "opto/machnode.hpp"
42 #include "opto/matcher.hpp"
43 #include "opto/movenode.hpp"
44 #include "opto/parse.hpp"
45 #include "opto/regalloc.hpp"
46 #include "opto/regmask.hpp"
47 #include "opto/rootnode.hpp"
48 #include "opto/runtime.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/stubRoutines.hpp"
52 #include "utilities/powerOfTwo.hpp"
53
54 // Portions of code courtesy of Clifford Click
55
56 // Optimization - Graph Style
57
58 //=============================================================================
59 uint StartNode::size_of() const { return sizeof(*this); }
60 bool StartNode::cmp( const Node &n ) const
61 { return _domain == ((StartNode&)n)._domain; }
62 const Type *StartNode::bottom_type() const { return _domain; }
63 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
64 #ifndef PRODUCT
65 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
66 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
67 #endif
68
69 //------------------------------Ideal------------------------------------------
70 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
71 return remove_dead_region(phase, can_reshape) ? this : nullptr;
72 }
73
74 //------------------------------calling_convention-----------------------------
75 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
76 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
77 }
78
79 //------------------------------Registers--------------------------------------
80 const RegMask &StartNode::in_RegMask(uint) const {
81 return RegMask::EMPTY;
82 }
83
84 //------------------------------match------------------------------------------
85 // Construct projections for incoming parameters, and their RegMask info
86 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
87 switch (proj->_con) {
88 case TypeFunc::Control:
89 case TypeFunc::I_O:
90 case TypeFunc::Memory:
91 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
92 case TypeFunc::FramePtr:
93 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
94 case TypeFunc::ReturnAdr:
95 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
96 case TypeFunc::Parms:
97 default: {
98 uint parm_num = proj->_con - TypeFunc::Parms;
99 const Type *t = _domain->field_at(proj->_con);
100 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
101 return new ConNode(Type::TOP);
102 uint ideal_reg = t->ideal_reg();
103 RegMask &rm = match->_calling_convention_mask[parm_num];
104 return new MachProjNode(this,proj->_con,rm,ideal_reg);
105 }
106 }
107 return nullptr;
108 }
109
110 //=============================================================================
111 const char * const ParmNode::names[TypeFunc::Parms+1] = {
112 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
113 };
114
115 #ifndef PRODUCT
116 void ParmNode::dump_spec(outputStream *st) const {
117 if( _con < TypeFunc::Parms ) {
118 st->print("%s", names[_con]);
119 } else {
120 st->print("Parm%d: ",_con-TypeFunc::Parms);
121 // Verbose and WizardMode dump bottom_type for all nodes
122 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
123 }
124 }
125
126 void ParmNode::dump_compact_spec(outputStream *st) const {
127 if (_con < TypeFunc::Parms) {
128 st->print("%s", names[_con]);
129 } else {
477 if (cik->is_instance_klass()) {
478 cik->print_name_on(st);
479 iklass = cik->as_instance_klass();
480 } else if (cik->is_type_array_klass()) {
481 cik->as_array_klass()->base_element_type()->print_name_on(st);
482 st->print("[%d]", spobj->n_fields());
483 } else if (cik->is_obj_array_klass()) {
484 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
485 if (cie->is_instance_klass()) {
486 cie->print_name_on(st);
487 } else if (cie->is_type_array_klass()) {
488 cie->as_array_klass()->base_element_type()->print_name_on(st);
489 } else {
490 ShouldNotReachHere();
491 }
492 st->print("[%d]", spobj->n_fields());
493 int ndim = cik->as_array_klass()->dimension() - 1;
494 while (ndim-- > 0) {
495 st->print("[]");
496 }
497 } else {
498 assert(false, "unexpected type %s", cik->name()->as_utf8());
499 }
500 st->print("={");
501 uint nf = spobj->n_fields();
502 if (nf > 0) {
503 uint first_ind = spobj->first_index(mcall->jvms());
504 if (iklass != nullptr && iklass->is_inlinetype()) {
505 Node* null_marker = mcall->in(first_ind++);
506 if (!null_marker->is_top()) {
507 st->print(" [null marker");
508 format_helper(regalloc, st, null_marker, ":", -1, nullptr);
509 }
510 }
511 Node* fld_node = mcall->in(first_ind);
512 if (iklass != nullptr) {
513 st->print(" [");
514 iklass->nonstatic_field_at(0)->print_name_on(st);
515 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
516 } else {
517 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
518 }
519 for (uint j = 1; j < nf; j++) {
520 fld_node = mcall->in(first_ind+j);
521 if (iklass != nullptr) {
522 st->print(", [");
523 iklass->nonstatic_field_at(j)->print_name_on(st);
524 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
525 } else {
526 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
527 }
528 }
529 }
530 st->print(" }");
531 }
532 }
533 st->cr();
534 if (caller() != nullptr) caller()->format(regalloc, n, st);
535 }
536
537
538 void JVMState::dump_spec(outputStream *st) const {
539 if (_method != nullptr) {
540 bool printed = false;
541 if (!Verbose) {
542 // The JVMS dumps make really, really long lines.
543 // Take out the most boring parts, which are the package prefixes.
738 tf()->dump_on(st);
739 }
740 if (_cnt != COUNT_UNKNOWN) {
741 st->print(" C=%f", _cnt);
742 }
743 const Node* const klass_node = in(KlassNode);
744 if (klass_node != nullptr) {
745 const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr();
746
747 if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) {
748 st->print(" allocationKlass:");
749 klass_ptr->exact_klass()->print_name_on(st);
750 }
751 }
752 if (jvms() != nullptr) {
753 jvms()->dump_spec(st);
754 }
755 }
756 #endif
757
758 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
759 const Type* CallNode::Value(PhaseGVN* phase) const {
760 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
761 return Type::TOP;
762 }
763 return tf()->range_cc();
764 }
765
766 //------------------------------calling_convention-----------------------------
767 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
768 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
769 // The call to that stub is a special case: its inputs are
770 // multiple values returned from a call and so it should follow
771 // the return convention.
772 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
773 return;
774 }
775 // Use the standard compiler calling convention
776 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
777 }
778
779
780 //------------------------------match------------------------------------------
781 // Construct projections for control, I/O, memory-fields, ..., and
782 // return result(s) along with their RegMask info
783 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
784 uint con = proj->_con;
785 const TypeTuple* range_cc = tf()->range_cc();
786 if (con >= TypeFunc::Parms) {
787 if (tf()->returns_inline_type_as_fields()) {
788 // The call returns multiple values (inline type fields): we
789 // create one projection per returned value.
790 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
791 uint ideal_reg = range_cc->field_at(con)->ideal_reg();
792 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
793 } else {
794 if (con == TypeFunc::Parms) {
795 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
796 OptoRegPair regs = Opcode() == Op_CallLeafVector
797 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
798 : match->c_return_value(ideal_reg);
799 RegMask rm = RegMask(regs.first());
800
801 if (Opcode() == Op_CallLeafVector) {
802 // If the return is in vector, compute appropriate regmask taking into account the whole range
803 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
804 if(OptoReg::is_valid(regs.second())) {
805 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
806 rm.insert(r);
807 }
808 }
809 }
810 }
811
812 if (OptoReg::is_valid(regs.second())) {
813 rm.insert(regs.second());
814 }
815 return new MachProjNode(this,con,rm,ideal_reg);
816 } else {
817 assert(con == TypeFunc::Parms+1, "only one return value");
818 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
819 return new MachProjNode(this,con, RegMask::EMPTY, (uint)OptoReg::Bad);
820 }
821 }
822 }
823
824 switch (con) {
825 case TypeFunc::Control:
826 case TypeFunc::I_O:
827 case TypeFunc::Memory:
828 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
829
830 case TypeFunc::ReturnAdr:
831 case TypeFunc::FramePtr:
832 default:
833 ShouldNotReachHere();
834 }
835 return nullptr;
836 }
837
838 // Do we Match on this edge index or not? Match no edges
839 uint CallNode::match_edge(uint idx) const {
840 return 0;
841 }
842
843 //
844 // Determine whether the call could modify the field of the specified
845 // instance at the specified offset.
846 //
847 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
848 assert((t_oop != nullptr), "sanity");
849 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
850 const TypeTuple* args = _tf->domain_sig();
851 Node* dest = nullptr;
852 // Stubs that can be called once an ArrayCopyNode is expanded have
853 // different signatures. Look for the second pointer argument,
854 // that is the destination of the copy.
855 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
856 if (args->field_at(i)->isa_ptr()) {
857 j++;
858 if (j == 2) {
859 dest = in(i);
860 break;
861 }
862 }
863 }
864 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
865 if (phase->type(dest)->isa_rawptr()) {
866 // may happen for an arraycopy that initializes a newly allocated object. Conservatively return true;
867 return true;
868 }
869 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
870 return true;
883 Node* proj = proj_out_or_null(TypeFunc::Parms);
884 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
885 return false;
886 }
887 }
888 if (is_CallJava() && as_CallJava()->method() != nullptr) {
889 ciMethod* meth = as_CallJava()->method();
890 if (meth->is_getter()) {
891 return false;
892 }
893 // May modify (by reflection) if an boxing object is passed
894 // as argument or returned.
895 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
896 if (proj != nullptr) {
897 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
898 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
899 (inst_t->instance_klass() == boxing_klass))) {
900 return true;
901 }
902 }
903 const TypeTuple* d = tf()->domain_cc();
904 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
905 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
906 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
907 (inst_t->instance_klass() == boxing_klass))) {
908 return true;
909 }
910 }
911 return false;
912 }
913 }
914 return true;
915 }
916
917 // Does this call have a direct reference to n other than debug information?
918 bool CallNode::has_non_debug_use(Node* n) {
919 const TypeTuple* d = tf()->domain_cc();
920 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
921 if (in(i) == n) {
922 return true;
923 }
924 }
925 return false;
926 }
927
928 bool CallNode::has_debug_use(Node* n) {
929 if (jvms() != nullptr) {
930 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
931 if (in(i) == n) {
932 return true;
933 }
934 }
935 }
936 return false;
937 }
938
939 // Returns the unique CheckCastPP of a call
940 // or 'this' if there are several CheckCastPP or unexpected uses
941 // or returns null if there is no one.
942 Node *CallNode::result_cast() {
943 Node *cast = nullptr;
944
945 Node *p = proj_out_or_null(TypeFunc::Parms);
946 if (p == nullptr)
947 return nullptr;
948
949 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
950 Node *use = p->fast_out(i);
951 if (use->is_CheckCastPP()) {
952 if (cast != nullptr) {
953 return this; // more than 1 CheckCastPP
954 }
955 cast = use;
956 } else if (!use->is_Initialize() &&
957 !use->is_AddP() &&
958 use->Opcode() != Op_MemBarStoreStore) {
959 // Expected uses are restricted to a CheckCastPP, an Initialize
960 // node, a MemBarStoreStore (clone) and AddP nodes. If we
961 // encounter any other use (a Phi node can be seen in rare
962 // cases) return this to prevent incorrect optimizations.
963 return this;
964 }
965 }
966 return cast;
967 }
968
969
970 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) const {
971 uint max_res = TypeFunc::Parms-1;
972 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
973 ProjNode *pn = fast_out(i)->as_Proj();
974 max_res = MAX2(max_res, pn->_con);
975 }
976
977 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
978
979 uint projs_size = sizeof(CallProjections);
980 if (max_res > TypeFunc::Parms) {
981 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
982 }
983 char* projs_storage = resource_allocate_bytes(projs_size);
984 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
985
986 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
987 ProjNode *pn = fast_out(i)->as_Proj();
988 if (pn->outcnt() == 0) continue;
989 switch (pn->_con) {
990 case TypeFunc::Control:
991 {
992 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
993 projs->fallthrough_proj = pn;
994 const Node* cn = pn->unique_ctrl_out_or_null();
995 if (cn != nullptr && cn->is_Catch()) {
996 ProjNode *cpn = nullptr;
997 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
998 cpn = cn->fast_out(k)->as_Proj();
999 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
1000 if (cpn->_con == CatchProjNode::fall_through_index)
1001 projs->fallthrough_catchproj = cpn;
1002 else {
1003 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
1004 projs->catchall_catchproj = cpn;
1010 case TypeFunc::I_O:
1011 if (pn->_is_io_use)
1012 projs->catchall_ioproj = pn;
1013 else
1014 projs->fallthrough_ioproj = pn;
1015 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
1016 Node* e = pn->out(j);
1017 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
1018 assert(projs->exobj == nullptr, "only one");
1019 projs->exobj = e;
1020 }
1021 }
1022 break;
1023 case TypeFunc::Memory:
1024 if (pn->_is_io_use)
1025 projs->catchall_memproj = pn;
1026 else
1027 projs->fallthrough_memproj = pn;
1028 break;
1029 case TypeFunc::Parms:
1030 projs->resproj[0] = pn;
1031 break;
1032 default:
1033 assert(pn->_con <= max_res, "unexpected projection from allocation node.");
1034 projs->resproj[pn->_con-TypeFunc::Parms] = pn;
1035 break;
1036 }
1037 }
1038
1039 // The resproj may not exist because the result could be ignored
1040 // and the exception object may not exist if an exception handler
1041 // swallows the exception but all the other must exist and be found.
1042 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1043 assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found");
1044 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1045 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
1046 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
1047 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
1048 if (separate_io_proj) {
1049 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
1050 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
1051 }
1052 return projs;
1053 }
1054
1055 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1056 #ifdef ASSERT
1057 // Validate attached generator
1058 CallGenerator* cg = generator();
1059 if (cg != nullptr) {
1060 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
1061 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1062 }
1063 #endif // ASSERT
1064 return SafePointNode::Ideal(phase, can_reshape);
1065 }
1066
1067 bool CallNode::is_call_to_arraycopystub() const {
1068 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1069 return true;
1070 }
1071 return false;
1072 }
1073
1074 bool CallNode::is_call_to_multianewarray_stub() const {
1075 if (_name != nullptr &&
1076 strstr(_name, "multianewarray") != nullptr &&
1077 strstr(_name, "C2 runtime") != nullptr) {
1078 return true;
1079 }
1080 return false;
1081 }
1082
1083 //=============================================================================
1084 uint CallJavaNode::size_of() const { return sizeof(*this); }
1085 bool CallJavaNode::cmp( const Node &n ) const {
1086 CallJavaNode &call = (CallJavaNode&)n;
1087 return CallNode::cmp(call) && _method == call._method &&
1088 _override_symbolic_info == call._override_symbolic_info;
1089 }
1090
1091 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1092 // Copy debug information and adjust JVMState information
1093 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1094 uint new_dbg_start = tf()->domain_sig()->cnt();
1095 int jvms_adj = new_dbg_start - old_dbg_start;
1096 assert (new_dbg_start == req(), "argument count mismatch");
1097 Compile* C = phase->C;
1098
1099 // SafePointScalarObject node could be referenced several times in debug info.
1100 // Use Dict to record cloned nodes.
1101 Dict* sosn_map = new Dict(cmpkey,hashkey);
1102 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1103 Node* old_in = sfpt->in(i);
1104 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1105 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1106 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1107 bool new_node;
1108 Node* new_in = old_sosn->clone(sosn_map, new_node);
1109 if (new_node) { // New node?
1110 new_in->set_req(0, C->root()); // reset control edge
1111 new_in = phase->transform(new_in); // Register new node.
1112 }
1113 old_in = new_in;
1114 }
1115 add_req(old_in);
1116 }
1117
1118 // JVMS may be shared so clone it before we modify it
1119 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1120 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1121 jvms->set_map(this);
1122 jvms->set_locoff(jvms->locoff()+jvms_adj);
1123 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1124 jvms->set_monoff(jvms->monoff()+jvms_adj);
1125 jvms->set_scloff(jvms->scloff()+jvms_adj);
1126 jvms->set_endoff(jvms->endoff()+jvms_adj);
1127 }
1128 }
1129
1130 #ifdef ASSERT
1131 bool CallJavaNode::validate_symbolic_info() const {
1132 if (method() == nullptr) {
1133 return true; // call into runtime or uncommon trap
1134 }
1135 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1136 if (Arguments::is_valhalla_enabled() && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1137 return true;
1138 }
1139 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1140 ciMethod* callee = method();
1141 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1142 assert(override_symbolic_info(), "should be set");
1143 }
1144 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1145 return true;
1146 }
1147 #endif
1148
1149 #ifndef PRODUCT
1150 void CallJavaNode::dump_spec(outputStream* st) const {
1151 if( _method ) _method->print_short_name(st);
1152 CallNode::dump_spec(st);
1153 }
1154
1155 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1156 if (_method) {
1157 _method->print_short_name(st);
1158 } else {
1161 }
1162 #endif
1163
1164 void CallJavaNode::register_for_late_inline() {
1165 if (generator() != nullptr) {
1166 Compile::current()->prepend_late_inline(generator());
1167 set_generator(nullptr);
1168 } else {
1169 assert(false, "repeated inline attempt");
1170 }
1171 }
1172
1173 //=============================================================================
1174 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1175 bool CallStaticJavaNode::cmp( const Node &n ) const {
1176 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1177 return CallJavaNode::cmp(call);
1178 }
1179
1180 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1181 if (can_reshape && uncommon_trap_request() != 0) {
1182 PhaseIterGVN* igvn = phase->is_IterGVN();
1183 if (remove_unknown_flat_array_load(igvn, control(), memory(), in(TypeFunc::Parms))) {
1184 if (!control()->is_Region()) {
1185 igvn->replace_input_of(this, 0, phase->C->top());
1186 }
1187 return this;
1188 }
1189 }
1190
1191 // Try to replace the runtime call to the substitutability test emitted by acmp if (at least) one operand is a known type
1192 if (can_reshape && !control()->is_top() && method() != nullptr && method()->holder() == phase->C->env()->ValueObjectMethods_klass() &&
1193 (method()->name() == ciSymbols::isSubstitutable_name())) {
1194 Node* left = in(TypeFunc::Parms);
1195 Node* right = in(TypeFunc::Parms + 1);
1196 if (!left->is_top() && !right->is_top() && (left->is_InlineType() || right->is_InlineType())) {
1197 if (!left->is_InlineType()) {
1198 swap(left, right);
1199 }
1200 InlineTypeNode* vt = left->as_InlineType();
1201
1202 // Check if the field layout can be optimized
1203 if (vt->can_emit_substitutability_check(right)) {
1204 PhaseIterGVN* igvn = phase->is_IterGVN();
1205
1206 Node* ctrl = control();
1207 RegionNode* region = new RegionNode(1);
1208 Node* phi = new PhiNode(region, TypeInt::POS);
1209
1210 Node* base = right;
1211 Node* ptr = right;
1212 if (!base->is_InlineType()) {
1213 // Parse time checks guarantee that both operands are non-null and have the same type
1214 base = igvn->register_new_node_with_optimizer(new CheckCastPPNode(ctrl, base, vt->bottom_type()));
1215 ptr = base;
1216 }
1217 // Emit IR for field-wise comparison
1218 vt->check_substitutability(igvn, region, phi, &ctrl, in(TypeFunc::Memory), base, ptr);
1219
1220 // Equals
1221 region->add_req(ctrl);
1222 phi->add_req(igvn->intcon(1));
1223
1224 ctrl = igvn->register_new_node_with_optimizer(region);
1225 Node* res = igvn->register_new_node_with_optimizer(phi);
1226
1227 // Kill exception projections and return a tuple that will replace the call
1228 CallProjections* projs = extract_projections(false /*separate_io_proj*/);
1229 if (projs->fallthrough_catchproj != nullptr) {
1230 igvn->replace_node(projs->fallthrough_catchproj, ctrl);
1231 }
1232 if (projs->catchall_memproj != nullptr) {
1233 igvn->replace_node(projs->catchall_memproj, igvn->C->top());
1234 }
1235 if (projs->catchall_ioproj != nullptr) {
1236 igvn->replace_node(projs->catchall_ioproj, igvn->C->top());
1237 }
1238 if (projs->catchall_catchproj != nullptr) {
1239 igvn->replace_node(projs->catchall_catchproj, igvn->C->top());
1240 }
1241 return TupleNode::make(tf()->range_cc(), ctrl, i_o(), memory(), frameptr(), returnadr(), res);
1242 }
1243 }
1244 }
1245
1246 CallGenerator* cg = generator();
1247 if (can_reshape && cg != nullptr) {
1248 if (cg->is_mh_late_inline()) {
1249 assert(IncrementalInlineMH, "required");
1250 assert(cg->call_node() == this, "mismatch");
1251 assert(cg->method()->is_method_handle_intrinsic(), "required");
1252
1253 // Check whether this MH handle call becomes a candidate for inlining.
1254 ciMethod* callee = cg->method();
1255 vmIntrinsics::ID iid = callee->intrinsic_id();
1256 if (iid == vmIntrinsics::_invokeBasic) {
1257 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1258 register_for_late_inline();
1259 }
1260 } else if (iid == vmIntrinsics::_linkToNative) {
1261 // never retry
1262 } else {
1263 assert(callee->has_member_arg(), "wrong type of call?");
1264 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1265 register_for_late_inline();
1286
1287 //----------------------------uncommon_trap_request----------------------------
1288 // If this is an uncommon trap, return the request code, else zero.
1289 int CallStaticJavaNode::uncommon_trap_request() const {
1290 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1291 }
1292 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1293 #ifndef PRODUCT
1294 if (!(call->req() > TypeFunc::Parms &&
1295 call->in(TypeFunc::Parms) != nullptr &&
1296 call->in(TypeFunc::Parms)->is_Con() &&
1297 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1298 assert(in_dump() != 0, "OK if dumping");
1299 tty->print("[bad uncommon trap]");
1300 return 0;
1301 }
1302 #endif
1303 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1304 }
1305
1306 // Split if can cause the flat array branch of an array load with unknown type (see
1307 // Parse::array_load) to end in an uncommon trap. In that case, the call to
1308 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState.
1309 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) {
1310 if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) {
1311 return false;
1312 }
1313 if (ctl->is_Region()) {
1314 bool res = false;
1315 for (uint i = 1; i < ctl->req(); i++) {
1316 MergeMemNode* mm = mem->clone()->as_MergeMem();
1317 for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1318 Node* m = mms.memory();
1319 if (m->is_Phi() && m->in(0) == ctl) {
1320 mms.set_memory(m->in(i));
1321 }
1322 }
1323 if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) {
1324 res = true;
1325 if (!ctl->in(i)->is_Region()) {
1326 igvn->replace_input_of(ctl, i, igvn->C->top());
1327 }
1328 }
1329 igvn->remove_dead_node(mm, PhaseIterGVN::NodeOrigin::Speculative);
1330 }
1331 return res;
1332 }
1333 // Verify the control flow is ok
1334 Node* call = ctl;
1335 MemBarNode* membar = nullptr;
1336 for (;;) {
1337 if (call == nullptr || call->is_top()) {
1338 return false;
1339 }
1340 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1341 call = call->in(0);
1342 } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() &&
1343 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1344 // If there is no explicit flat array accesses in the compilation unit, there would be no
1345 // membar here
1346 if (call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar()) {
1347 membar = call->in(0)->in(0)->as_MemBar();
1348 }
1349 break;
1350 } else {
1351 return false;
1352 }
1353 }
1354
1355 JVMState* jvms = call->jvms();
1356 if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1357 return false;
1358 }
1359
1360 Node* call_mem = call->in(TypeFunc::Memory);
1361 if (call_mem == nullptr || call_mem->is_top()) {
1362 return false;
1363 }
1364 if (!call_mem->is_MergeMem()) {
1365 call_mem = MergeMemNode::make(call_mem);
1366 igvn->register_new_node_with_optimizer(call_mem);
1367 }
1368
1369 // Verify that there's no unexpected side effect
1370 for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1371 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1372 Node* m2 = mms2.memory2();
1373
1374 for (uint i = 0; i < 100; i++) {
1375 if (m1 == m2) {
1376 break;
1377 } else if (m1->is_Proj()) {
1378 m1 = m1->in(0);
1379 } else if (m1->is_MemBar()) {
1380 m1 = m1->in(TypeFunc::Memory);
1381 } else if (m1->Opcode() == Op_CallStaticJava &&
1382 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1383 if (m1 != call) {
1384 if (call_mem->outcnt() == 0) {
1385 igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative);
1386 }
1387 return false;
1388 }
1389 break;
1390 } else if (m1->is_MergeMem()) {
1391 MergeMemNode* mm = m1->as_MergeMem();
1392 int idx = mms2.alias_idx();
1393 if (idx == Compile::AliasIdxBot) {
1394 m1 = mm->base_memory();
1395 } else {
1396 m1 = mm->memory_at(idx);
1397 }
1398 } else {
1399 if (call_mem->outcnt() == 0) {
1400 igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative);
1401 }
1402 return false;
1403 }
1404 }
1405 }
1406 if (call_mem->outcnt() == 0) {
1407 igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative);
1408 }
1409
1410 // Remove membar preceding the call
1411 if (membar != nullptr) {
1412 membar->remove(igvn);
1413 }
1414
1415 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
1416 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr);
1417 unc->init_req(TypeFunc::Control, call->in(0));
1418 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1419 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1420 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
1421 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1422 unc->init_req(TypeFunc::Parms+0, unc_arg);
1423 unc->set_cnt(PROB_UNLIKELY_MAG(4));
1424 unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1425
1426 // Replace the call with an uncommon trap
1427 igvn->replace_input_of(call, 0, igvn->C->top());
1428
1429 igvn->register_new_node_with_optimizer(unc);
1430
1431 Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control));
1432 Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1433 igvn->add_input_to(igvn->C->root(), halt);
1434
1435 return true;
1436 }
1437
1438
1439 #ifndef PRODUCT
1440 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1441 st->print("# Static ");
1442 if (_name != nullptr) {
1443 st->print("%s", _name);
1444 int trap_req = uncommon_trap_request();
1445 if (trap_req != 0) {
1446 char buf[100];
1447 st->print("(%s)",
1448 Deoptimization::format_trap_request(buf, sizeof(buf),
1449 trap_req));
1450 }
1451 st->print(" ");
1452 }
1453 CallJavaNode::dump_spec(st);
1454 }
1455
1456 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1457 if (_method) {
1458 _method->print_short_name(st);
1534 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1535 bool CallRuntimeNode::cmp( const Node &n ) const {
1536 CallRuntimeNode &call = (CallRuntimeNode&)n;
1537 return CallNode::cmp(call) && !strcmp(_name,call._name);
1538 }
1539 #ifndef PRODUCT
1540 void CallRuntimeNode::dump_spec(outputStream *st) const {
1541 st->print("# ");
1542 st->print("%s", _name);
1543 CallNode::dump_spec(st);
1544 }
1545 #endif
1546 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1547 bool CallLeafVectorNode::cmp( const Node &n ) const {
1548 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1549 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1550 }
1551
1552 //------------------------------calling_convention-----------------------------
1553 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1554 if (_entry_point == nullptr) {
1555 // The call to that stub is a special case: its inputs are
1556 // multiple values returned from a call and so it should follow
1557 // the return convention.
1558 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1559 return;
1560 }
1561 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1562 }
1563
1564 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1565 #ifdef ASSERT
1566 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1567 "return vector size must match");
1568 const TypeTuple* d = tf()->domain_sig();
1569 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1570 Node* arg = in(i);
1571 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1572 "vector argument size must match");
1573 }
1574 #endif
1575
1576 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1577 }
1578
1579 //=============================================================================
1580 //------------------------------calling_convention-----------------------------
1581
1582
1583 //=============================================================================
1584 bool CallLeafPureNode::is_unused() const {
1585 return proj_out_or_null(TypeFunc::Parms) == nullptr;
1586 }
1587
1588 bool CallLeafPureNode::is_dead() const {
1589 return proj_out_or_null(TypeFunc::Control) == nullptr;
1590 }
1591
1592 /* We make a tuple of the global input state + TOP for the output values.
1593 * We use this to delete a pure function that is not used: by replacing the call with
1594 * such a tuple, we let output Proj's idealization pick the corresponding input of the
1595 * pure call, so jumping over it, and effectively, removing the call from the graph.
1596 * This avoids doing the graph surgery manually, but leaves that to IGVN
1597 * that is specialized for doing that right. We need also tuple components for output
1598 * values of the function to respect the return arity, and in case there is a projection
1599 * that would pick an output (which shouldn't happen at the moment).
1600 */
1601 TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const {
1602 // Transparently propagate input state but parameters
1603 TupleNode* tuple = TupleNode::make(
1604 tf()->range_cc(),
1605 in(TypeFunc::Control),
1606 in(TypeFunc::I_O),
1607 in(TypeFunc::Memory),
1608 in(TypeFunc::FramePtr),
1609 in(TypeFunc::ReturnAdr));
1610
1611 // And add TOPs for the return values
1612 for (uint i = TypeFunc::Parms; i < tf()->range_cc()->cnt(); i++) {
1613 tuple->set_req(i, C->top());
1614 }
1615
1616 return tuple;
1617 }
1618
1619 CallLeafPureNode* CallLeafPureNode::inline_call_leaf_pure_node(Node* control) const {
1620 Node* top = Compile::current()->top();
1621 if (control == nullptr) {
1622 control = in(TypeFunc::Control);
1623 }
1624
1625 CallLeafPureNode* call = new CallLeafPureNode(tf(), entry_point(), _name);
1626 call->init_req(TypeFunc::Control, control);
1627 call->init_req(TypeFunc::I_O, top);
1628 call->init_req(TypeFunc::Memory, top);
1629 call->init_req(TypeFunc::ReturnAdr, top);
1630 call->init_req(TypeFunc::FramePtr, top);
1631 for (unsigned int i = 0; i < tf()->domain_cc()->cnt() - TypeFunc::Parms; i++) {
1632 call->init_req(TypeFunc::Parms + i, in(TypeFunc::Parms + i));
1633 }
1634
1635 return call;
1636 }
1637
1638 Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1639 if (is_dead()) {
1640 return nullptr;
1641 }
1642
1643 // We need to wait until IGVN because during parsing, usages might still be missing
1644 // and we would remove the call immediately.
1645 if (can_reshape && is_unused()) {
1646 // The result is not used. We remove the call by replacing it with a tuple, that
1647 // is later disintegrated by the projections.
1648 return make_tuple_of_input_state_and_top_return_values(phase->C);
1649 }
1650
1651 return CallRuntimeNode::Ideal(phase, can_reshape);
1652 }
1653
1654 #ifndef PRODUCT
1655 void CallLeafNode::dump_spec(outputStream *st) const {
1656 st->print("# ");
1657 st->print("%s", _name);
1658 CallNode::dump_spec(st);
1659 }
1660 #endif
1661
1662 uint CallLeafNoFPNode::match_edge(uint idx) const {
1663 // Null entry point is a special case for which the target is in a
1664 // register. Need to match that edge.
1665 return entry_point() == nullptr && idx == TypeFunc::Parms;
1666 }
1667
1668 //=============================================================================
1669
1670 void SafePointNode::set_local(const JVMState* jvms, uint idx, Node *c) {
1671 assert(verify_jvms(jvms), "jvms must match");
1672 int loc = jvms->locoff() + idx;
1673 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1674 // If current local idx is top then local idx - 1 could
1675 // be a long/double that needs to be killed since top could
1676 // represent the 2nd half of the long/double.
1677 uint ideal = in(loc -1)->ideal_reg();
1678 if (ideal == Op_RegD || ideal == Op_RegL) {
1679 // set other (low index) half to top
1680 set_req(loc - 1, in(loc));
1681 }
1682 }
1683 set_req(loc, c);
1684 }
1685
1686 uint SafePointNode::size_of() const { return sizeof(*this); }
1687 bool SafePointNode::cmp( const Node &n ) const {
1698 }
1699 }
1700
1701
1702 //----------------------------next_exception-----------------------------------
1703 SafePointNode* SafePointNode::next_exception() const {
1704 if (len() == req()) {
1705 return nullptr;
1706 } else {
1707 Node* n = in(req());
1708 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1709 return (SafePointNode*) n;
1710 }
1711 }
1712
1713
1714 //------------------------------Ideal------------------------------------------
1715 // Skip over any collapsed Regions
1716 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1717 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1718 if (remove_dead_region(phase, can_reshape)) {
1719 return this;
1720 }
1721 // Scalarize inline types in safepoint debug info.
1722 // Delay this until all inlining is over to avoid getting inconsistent debug info.
1723 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) {
1724 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
1725 Node* n = in(i)->uncast();
1726 if (n->is_InlineType()) {
1727 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
1728 }
1729 }
1730 }
1731 return nullptr;
1732 }
1733
1734 //------------------------------Identity---------------------------------------
1735 // Remove obviously duplicate safepoints
1736 Node* SafePointNode::Identity(PhaseGVN* phase) {
1737
1738 // If you have back to back safepoints, remove one
1739 if (in(TypeFunc::Control)->is_SafePoint()) {
1740 Node* out_c = unique_ctrl_out_or_null();
1741 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1742 // outer loop's safepoint could confuse removal of the outer loop.
1743 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1744 return in(TypeFunc::Control);
1745 }
1746 }
1747
1748 // Transforming long counted loops requires a safepoint node. Do not
1749 // eliminate a safepoint until loop opts are over.
1750 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1751 Node *n0 = in(0)->in(0);
1865 }
1866
1867 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1868 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1869 int nb = igvn->C->root()->find_prec_edge(this);
1870 if (nb != -1) {
1871 igvn->delete_precedence_of(igvn->C->root(), nb);
1872 }
1873 }
1874
1875 //============== SafePointScalarObjectNode ==============
1876
1877 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1878 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1879 _first_index(first_index),
1880 _depth(depth),
1881 _n_fields(n_fields),
1882 _alloc(alloc)
1883 {
1884 #ifdef ASSERT
1885 if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1886 alloc->dump();
1887 assert(false, "unexpected call node");
1888 }
1889 #endif
1890 init_class_id(Class_SafePointScalarObject);
1891 }
1892
1893 // Do not allow value-numbering for SafePointScalarObject node.
1894 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1895 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1896 return (&n == this); // Always fail except on self
1897 }
1898
1899 uint SafePointScalarObjectNode::ideal_reg() const {
1900 return 0; // No matching to machine instruction
1901 }
1902
1903 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1904 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1905 }
1970 new_node = false;
1971 return (SafePointScalarMergeNode*)cached;
1972 }
1973 new_node = true;
1974 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1975 sosn_map->Insert((void*)this, (void*)res);
1976 return res;
1977 }
1978
1979 #ifndef PRODUCT
1980 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1981 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1982 }
1983 #endif
1984
1985 //=============================================================================
1986 uint AllocateNode::size_of() const { return sizeof(*this); }
1987
1988 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1989 Node *ctrl, Node *mem, Node *abio,
1990 Node *size, Node *klass_node,
1991 Node* initial_test,
1992 InlineTypeNode* inline_type_node)
1993 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1994 {
1995 init_class_id(Class_Allocate);
1996 init_flags(Flag_is_macro);
1997 _is_scalar_replaceable = false;
1998 _is_non_escaping = false;
1999 _is_allocation_MemBar_redundant = false;
2000 Node *topnode = C->top();
2001
2002 init_req( TypeFunc::Control , ctrl );
2003 init_req( TypeFunc::I_O , abio );
2004 init_req( TypeFunc::Memory , mem );
2005 init_req( TypeFunc::ReturnAdr, topnode );
2006 init_req( TypeFunc::FramePtr , topnode );
2007 init_req( AllocSize , size);
2008 init_req( KlassNode , klass_node);
2009 init_req( InitialTest , initial_test);
2010 init_req( ALength , topnode);
2011 init_req( ValidLengthTest , topnode);
2012 init_req( InlineType , inline_type_node);
2013 // DefaultValue defaults to nullptr
2014 // RawDefaultValue defaults to nullptr
2015 C->add_macro_node(this);
2016 }
2017
2018 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
2019 {
2020 assert(initializer != nullptr &&
2021 (initializer->is_object_constructor() || initializer->is_class_initializer()),
2022 "unexpected initializer method");
2023 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
2024 if (analyzer == nullptr) {
2025 return;
2026 }
2027
2028 // Allocation node is first parameter in its initializer
2029 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
2030 _is_allocation_MemBar_redundant = true;
2031 }
2032 }
2033
2034 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
2035 Node* mark_node = nullptr;
2036 if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
2037 Node* klass_node = in(AllocateNode::KlassNode);
2038 Node* proto_adr = phase->transform(AddPNode::make_with_base(phase->C->top(), klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
2039 mark_node = LoadNode::make(*phase, control, mem, proto_adr, phase->type(proto_adr)->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
2040 } else {
2041 // For now only enable fast locking for non-array types
2042 mark_node = phase->MakeConX(markWord::prototype().value());
2043 }
2044 return mark_node;
2045 }
2046
2047 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
2048 // CastII, if appropriate. If we are not allowed to create new nodes, and
2049 // a CastII is appropriate, return null.
2050 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
2051 Node *length = in(AllocateNode::ALength);
2052 assert(length != nullptr, "length is not null");
2053
2054 const TypeInt* length_type = phase->find_int_type(length);
2055 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
2056
2057 if (ary_type != nullptr && length_type != nullptr) {
2058 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
2420
2421 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2422 st->print("%s", _kind_names[_kind]);
2423 }
2424 #endif
2425
2426 //=============================================================================
2427 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2428
2429 // perform any generic optimizations first (returns 'this' or null)
2430 Node *result = SafePointNode::Ideal(phase, can_reshape);
2431 if (result != nullptr) return result;
2432 // Don't bother trying to transform a dead node
2433 if (in(0) && in(0)->is_top()) return nullptr;
2434
2435 // Now see if we can optimize away this lock. We don't actually
2436 // remove the locking here, we simply set the _eliminate flag which
2437 // prevents macro expansion from expanding the lock. Since we don't
2438 // modify the graph, the value returned from this function is the
2439 // one computed above.
2440 const Type* obj_type = phase->type(obj_node());
2441 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2442 //
2443 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2444 //
2445 ConnectionGraph *cgr = phase->C->congraph();
2446 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2447 assert(!is_eliminated() || is_coarsened(), "sanity");
2448 // The lock could be marked eliminated by lock coarsening
2449 // code during first IGVN before EA. Replace coarsened flag
2450 // to eliminate all associated locks/unlocks.
2451 #ifdef ASSERT
2452 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2453 #endif
2454 this->set_non_esc_obj();
2455 return result;
2456 }
2457
2458 if (!phase->C->do_locks_coarsening()) {
2459 return result; // Compiling without locks coarsening
2460 }
2461 //
2622 }
2623
2624 //=============================================================================
2625 uint UnlockNode::size_of() const { return sizeof(*this); }
2626
2627 //=============================================================================
2628 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2629
2630 // perform any generic optimizations first (returns 'this' or null)
2631 Node *result = SafePointNode::Ideal(phase, can_reshape);
2632 if (result != nullptr) return result;
2633 // Don't bother trying to transform a dead node
2634 if (in(0) && in(0)->is_top()) return nullptr;
2635
2636 // Now see if we can optimize away this unlock. We don't actually
2637 // remove the unlocking here, we simply set the _eliminate flag which
2638 // prevents macro expansion from expanding the unlock. Since we don't
2639 // modify the graph, the value returned from this function is the
2640 // one computed above.
2641 // Escape state is defined after Parse phase.
2642 const Type* obj_type = phase->type(obj_node());
2643 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2644 //
2645 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2646 //
2647 ConnectionGraph *cgr = phase->C->congraph();
2648 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2649 assert(!is_eliminated() || is_coarsened(), "sanity");
2650 // The lock could be marked eliminated by lock coarsening
2651 // code during first IGVN before EA. Replace coarsened flag
2652 // to eliminate all associated locks/unlocks.
2653 #ifdef ASSERT
2654 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2655 #endif
2656 this->set_non_esc_obj();
2657 }
2658 }
2659 return result;
2660 }
2661
2662 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2663 if (C == nullptr) {
2703 }
2704 // unrelated
2705 return false;
2706 }
2707
2708 if (dest_t->isa_aryptr()) {
2709 // arraycopy or array clone
2710 if (t_oop->isa_instptr()) {
2711 return false;
2712 }
2713 if (!t_oop->isa_aryptr()) {
2714 return true;
2715 }
2716
2717 const Type* elem = dest_t->is_aryptr()->elem();
2718 if (elem == Type::BOTTOM) {
2719 // An array but we don't know what elements are
2720 return true;
2721 }
2722
2723 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2724 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2725 uint dest_alias = phase->C->get_alias_index(dest_t);
2726 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2727
2728 return dest_alias == t_oop_alias;
2729 }
2730
2731 return true;
2732 }
2733
2734 PowDNode::PowDNode(Compile* C, Node* base, Node* exp)
2735 : CallLeafPureNode(
2736 OptoRuntime::Math_DD_D_Type(),
2737 StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
2738 "pow") {
2739 add_flag(Flag_is_macro);
2740 C->add_macro_node(this);
2741
2742 init_req(TypeFunc::Parms + 0, base);
2743 init_req(TypeFunc::Parms + 1, C->top()); // double slot padding
2744 init_req(TypeFunc::Parms + 2, exp);
2770 // i.e., pow(x, +/-0.0D) => 1.0
2771 if (e == 0.0) { // true for both -0.0 and +0.0
2772 result_t = TypeD::ONE;
2773 }
2774
2775 // If the second argument is NaN, then the result is NaN.
2776 // i.e., pow(x, NaN) => NaN
2777 if (g_isnan(e)) {
2778 result_t = TypeD::make(NAN);
2779 }
2780 }
2781
2782 if (result_t != nullptr) {
2783 // We can't simply return a TypeD here, it must be a tuple type to be compatible with call nodes.
2784 const Type** fields = TypeTuple::fields(2);
2785 fields[TypeFunc::Parms + 0] = result_t;
2786 fields[TypeFunc::Parms + 1] = Type::HALF;
2787 return TypeTuple::make(TypeFunc::Parms + 2, fields);
2788 }
2789
2790 return tf()->range_cc();
2791 }
2792
2793 Node* PowDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2794 if (!can_reshape) {
2795 return nullptr; // wait for igvn
2796 }
2797
2798 PhaseIterGVN* igvn = phase->is_IterGVN();
2799 Node* base = this->base();
2800 Node* exp = this->exp();
2801
2802 const Type* t_exp = phase->type(exp);
2803 const TypeD* exp_con = t_exp->isa_double_constant();
2804
2805 // Special cases when only the exponent is known:
2806 if (exp_con != nullptr) {
2807 double e = exp_con->getd();
2808
2809 // If the second argument is 1.0, then the result is the same as the first argument.
2810 // i.e., pow(x, 1.0) => x
2857
2858 igvn->C->set_has_split_ifs(true); // Has chance for split-if optimization
2859
2860 return make_tuple_of_input_state_and_result(igvn, phi, region);
2861 }
2862 }
2863
2864 return CallLeafPureNode::Ideal(phase, can_reshape);
2865 }
2866
2867 // We can't simply have Ideal() returning a Con or MulNode since the users are still expecting a Call node, but we could
2868 // produce a tuple that follows the same pattern so users can still get control, io, memory, etc..
2869 TupleNode* PowDNode::make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control) {
2870 if (control == nullptr) {
2871 control = in(TypeFunc::Control);
2872 }
2873
2874 Compile* C = phase->C;
2875 C->remove_macro_node(this);
2876 TupleNode* tuple = TupleNode::make(
2877 tf()->range_cc(),
2878 control,
2879 in(TypeFunc::I_O),
2880 in(TypeFunc::Memory),
2881 in(TypeFunc::FramePtr),
2882 in(TypeFunc::ReturnAdr),
2883 result,
2884 C->top());
2885 return tuple;
2886 }
|