7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/bcEscapeAnalyzer.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "code/vmreg.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 uint StartNode::size_of() const { return sizeof(*this); }
55 bool StartNode::cmp( const Node &n ) const
56 { return _domain == ((StartNode&)n)._domain; }
57 const Type *StartNode::bottom_type() const { return _domain; }
58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
59 #ifndef PRODUCT
60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
62 #endif
63
64 //------------------------------Ideal------------------------------------------
65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
66 return remove_dead_region(phase, can_reshape) ? this : nullptr;
67 }
68
69 //------------------------------calling_convention-----------------------------
70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
71 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
72 }
73
74 //------------------------------Registers--------------------------------------
75 const RegMask &StartNode::in_RegMask(uint) const {
76 return RegMask::Empty;
77 }
78
79 //------------------------------match------------------------------------------
80 // Construct projections for incoming parameters, and their RegMask info
81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
82 switch (proj->_con) {
83 case TypeFunc::Control:
84 case TypeFunc::I_O:
85 case TypeFunc::Memory:
86 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
87 case TypeFunc::FramePtr:
88 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
89 case TypeFunc::ReturnAdr:
90 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
91 case TypeFunc::Parms:
92 default: {
93 uint parm_num = proj->_con - TypeFunc::Parms;
94 const Type *t = _domain->field_at(proj->_con);
95 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
96 return new ConNode(Type::TOP);
97 uint ideal_reg = t->ideal_reg();
98 RegMask &rm = match->_calling_convention_mask[parm_num];
99 return new MachProjNode(this,proj->_con,rm,ideal_reg);
100 }
101 }
102 return nullptr;
103 }
104
105 //------------------------------StartOSRNode----------------------------------
106 // The method start node for an on stack replacement adapter
107
108 //------------------------------osr_domain-----------------------------
109 const TypeTuple *StartOSRNode::osr_domain() {
110 const Type **fields = TypeTuple::fields(2);
111 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
112
113 return TypeTuple::make(TypeFunc::Parms+1, fields);
114 }
115
116 //=============================================================================
117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
118 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
119 };
120
121 #ifndef PRODUCT
122 void ParmNode::dump_spec(outputStream *st) const {
123 if( _con < TypeFunc::Parms ) {
124 st->print("%s", names[_con]);
125 } else {
126 st->print("Parm%d: ",_con-TypeFunc::Parms);
127 // Verbose and WizardMode dump bottom_type for all nodes
128 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
129 }
130 }
131
132 void ParmNode::dump_compact_spec(outputStream *st) const {
133 if (_con < TypeFunc::Parms) {
134 st->print("%s", names[_con]);
135 } else {
481 if (cik->is_instance_klass()) {
482 cik->print_name_on(st);
483 iklass = cik->as_instance_klass();
484 } else if (cik->is_type_array_klass()) {
485 cik->as_array_klass()->base_element_type()->print_name_on(st);
486 st->print("[%d]", spobj->n_fields());
487 } else if (cik->is_obj_array_klass()) {
488 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
489 if (cie->is_instance_klass()) {
490 cie->print_name_on(st);
491 } else if (cie->is_type_array_klass()) {
492 cie->as_array_klass()->base_element_type()->print_name_on(st);
493 } else {
494 ShouldNotReachHere();
495 }
496 st->print("[%d]", spobj->n_fields());
497 int ndim = cik->as_array_klass()->dimension() - 1;
498 while (ndim-- > 0) {
499 st->print("[]");
500 }
501 }
502 st->print("={");
503 uint nf = spobj->n_fields();
504 if (nf > 0) {
505 uint first_ind = spobj->first_index(mcall->jvms());
506 Node* fld_node = mcall->in(first_ind);
507 ciField* cifield;
508 if (iklass != nullptr) {
509 st->print(" [");
510 cifield = iklass->nonstatic_field_at(0);
511 cifield->print_name_on(st);
512 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
513 } else {
514 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
515 }
516 for (uint j = 1; j < nf; j++) {
517 fld_node = mcall->in(first_ind+j);
518 if (iklass != nullptr) {
519 st->print(", [");
520 cifield = iklass->nonstatic_field_at(j);
521 cifield->print_name_on(st);
522 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
523 } else {
524 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
525 }
699 if (i == TypeFunc::Parms) st->print("(");
700 Node* p = in(i);
701 if (p != nullptr) {
702 p->dump_idx(false, st, dc);
703 st->print(" ");
704 } else {
705 st->print("_ ");
706 }
707 }
708 st->print(")");
709 }
710
711 void CallNode::dump_spec(outputStream *st) const {
712 st->print(" ");
713 if (tf() != nullptr) tf()->dump_on(st);
714 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
715 if (jvms() != nullptr) jvms()->dump_spec(st);
716 }
717 #endif
718
719 const Type *CallNode::bottom_type() const { return tf()->range(); }
720 const Type* CallNode::Value(PhaseGVN* phase) const {
721 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
722 return tf()->range();
723 }
724
725 //------------------------------calling_convention-----------------------------
726 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
727 // Use the standard compiler calling convention
728 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
729 }
730
731
732 //------------------------------match------------------------------------------
733 // Construct projections for control, I/O, memory-fields, ..., and
734 // return result(s) along with their RegMask info
735 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
736 switch (proj->_con) {
737 case TypeFunc::Control:
738 case TypeFunc::I_O:
739 case TypeFunc::Memory:
740 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
741
742 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
743 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
744 // 2nd half of doubles and longs
745 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
746
747 case TypeFunc::Parms: { // Normal returns
748 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
749 OptoRegPair regs = Opcode() == Op_CallLeafVector
750 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
751 : is_CallRuntime()
752 ? match->c_return_value(ideal_reg) // Calls into C runtime
753 : match-> return_value(ideal_reg); // Calls into compiled Java code
754 RegMask rm = RegMask(regs.first());
755
756 if (Opcode() == Op_CallLeafVector) {
757 // If the return is in vector, compute appropriate regmask taking into account the whole range
758 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
759 if(OptoReg::is_valid(regs.second())) {
760 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
761 rm.Insert(r);
762 }
763 }
764 }
765 }
766
767 if( OptoReg::is_valid(regs.second()) )
768 rm.Insert( regs.second() );
769 return new MachProjNode(this,proj->_con,rm,ideal_reg);
770 }
771
772 case TypeFunc::ReturnAdr:
773 case TypeFunc::FramePtr:
774 default:
775 ShouldNotReachHere();
776 }
777 return nullptr;
778 }
779
780 // Do we Match on this edge index or not? Match no edges
781 uint CallNode::match_edge(uint idx) const {
782 return 0;
783 }
784
785 //
786 // Determine whether the call could modify the field of the specified
787 // instance at the specified offset.
788 //
789 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
790 assert((t_oop != nullptr), "sanity");
791 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
792 const TypeTuple* args = _tf->domain();
793 Node* dest = nullptr;
794 // Stubs that can be called once an ArrayCopyNode is expanded have
795 // different signatures. Look for the second pointer argument,
796 // that is the destination of the copy.
797 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
798 if (args->field_at(i)->isa_ptr()) {
799 j++;
800 if (j == 2) {
801 dest = in(i);
802 break;
803 }
804 }
805 }
806 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
807 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
808 return true;
809 }
810 return false;
811 }
812 if (t_oop->is_known_instance()) {
821 Node* proj = proj_out_or_null(TypeFunc::Parms);
822 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
823 return false;
824 }
825 }
826 if (is_CallJava() && as_CallJava()->method() != nullptr) {
827 ciMethod* meth = as_CallJava()->method();
828 if (meth->is_getter()) {
829 return false;
830 }
831 // May modify (by reflection) if an boxing object is passed
832 // as argument or returned.
833 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
834 if (proj != nullptr) {
835 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
836 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
837 (inst_t->instance_klass() == boxing_klass))) {
838 return true;
839 }
840 }
841 const TypeTuple* d = tf()->domain();
842 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
843 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
844 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
845 (inst_t->instance_klass() == boxing_klass))) {
846 return true;
847 }
848 }
849 return false;
850 }
851 }
852 return true;
853 }
854
855 // Does this call have a direct reference to n other than debug information?
856 bool CallNode::has_non_debug_use(Node *n) {
857 const TypeTuple * d = tf()->domain();
858 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
859 Node *arg = in(i);
860 if (arg == n) {
861 return true;
862 }
863 }
864 return false;
865 }
866
867 // Returns the unique CheckCastPP of a call
868 // or 'this' if there are several CheckCastPP or unexpected uses
869 // or returns null if there is no one.
870 Node *CallNode::result_cast() {
871 Node *cast = nullptr;
872
873 Node *p = proj_out_or_null(TypeFunc::Parms);
874 if (p == nullptr)
875 return nullptr;
876
877 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
878 Node *use = p->fast_out(i);
879 if (use->is_CheckCastPP()) {
880 if (cast != nullptr) {
881 return this; // more than 1 CheckCastPP
882 }
883 cast = use;
884 } else if (!use->is_Initialize() &&
885 !use->is_AddP() &&
886 use->Opcode() != Op_MemBarStoreStore) {
887 // Expected uses are restricted to a CheckCastPP, an Initialize
888 // node, a MemBarStoreStore (clone) and AddP nodes. If we
889 // encounter any other use (a Phi node can be seen in rare
890 // cases) return this to prevent incorrect optimizations.
891 return this;
892 }
893 }
894 return cast;
895 }
896
897
898 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
899 projs->fallthrough_proj = nullptr;
900 projs->fallthrough_catchproj = nullptr;
901 projs->fallthrough_ioproj = nullptr;
902 projs->catchall_ioproj = nullptr;
903 projs->catchall_catchproj = nullptr;
904 projs->fallthrough_memproj = nullptr;
905 projs->catchall_memproj = nullptr;
906 projs->resproj = nullptr;
907 projs->exobj = nullptr;
908
909 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
910 ProjNode *pn = fast_out(i)->as_Proj();
911 if (pn->outcnt() == 0) continue;
912 switch (pn->_con) {
913 case TypeFunc::Control:
914 {
915 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
916 projs->fallthrough_proj = pn;
917 const Node* cn = pn->unique_ctrl_out_or_null();
918 if (cn != nullptr && cn->is_Catch()) {
919 ProjNode *cpn = nullptr;
920 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
921 cpn = cn->fast_out(k)->as_Proj();
922 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
923 if (cpn->_con == CatchProjNode::fall_through_index)
924 projs->fallthrough_catchproj = cpn;
925 else {
926 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
927 projs->catchall_catchproj = cpn;
933 case TypeFunc::I_O:
934 if (pn->_is_io_use)
935 projs->catchall_ioproj = pn;
936 else
937 projs->fallthrough_ioproj = pn;
938 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
939 Node* e = pn->out(j);
940 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
941 assert(projs->exobj == nullptr, "only one");
942 projs->exobj = e;
943 }
944 }
945 break;
946 case TypeFunc::Memory:
947 if (pn->_is_io_use)
948 projs->catchall_memproj = pn;
949 else
950 projs->fallthrough_memproj = pn;
951 break;
952 case TypeFunc::Parms:
953 projs->resproj = pn;
954 break;
955 default:
956 assert(false, "unexpected projection from allocation node.");
957 }
958 }
959
960 // The resproj may not exist because the result could be ignored
961 // and the exception object may not exist if an exception handler
962 // swallows the exception but all the other must exist and be found.
963 assert(projs->fallthrough_proj != nullptr, "must be found");
964 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
965 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
966 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
967 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
968 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
969 if (separate_io_proj) {
970 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
971 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
972 }
973 }
974
975 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
976 #ifdef ASSERT
977 // Validate attached generator
978 CallGenerator* cg = generator();
979 if (cg != nullptr) {
980 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
981 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
982 }
983 #endif // ASSERT
984 return SafePointNode::Ideal(phase, can_reshape);
985 }
986
987 bool CallNode::is_call_to_arraycopystub() const {
988 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
989 return true;
990 }
991 return false;
992 }
993
994 //=============================================================================
995 uint CallJavaNode::size_of() const { return sizeof(*this); }
996 bool CallJavaNode::cmp( const Node &n ) const {
997 CallJavaNode &call = (CallJavaNode&)n;
998 return CallNode::cmp(call) && _method == call._method &&
999 _override_symbolic_info == call._override_symbolic_info;
1000 }
1001
1002 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1003 // Copy debug information and adjust JVMState information
1004 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1005 uint new_dbg_start = tf()->domain()->cnt();
1006 int jvms_adj = new_dbg_start - old_dbg_start;
1007 assert (new_dbg_start == req(), "argument count mismatch");
1008 Compile* C = phase->C;
1009
1010 // SafePointScalarObject node could be referenced several times in debug info.
1011 // Use Dict to record cloned nodes.
1012 Dict* sosn_map = new Dict(cmpkey,hashkey);
1013 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1014 Node* old_in = sfpt->in(i);
1015 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1016 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1017 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1018 bool new_node;
1019 Node* new_in = old_sosn->clone(sosn_map, new_node);
1020 if (new_node) { // New node?
1021 new_in->set_req(0, C->root()); // reset control edge
1022 new_in = phase->transform(new_in); // Register new node.
1023 }
1024 old_in = new_in;
1025 }
1026 add_req(old_in);
1027 }
1028
1029 // JVMS may be shared so clone it before we modify it
1030 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1031 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1032 jvms->set_map(this);
1033 jvms->set_locoff(jvms->locoff()+jvms_adj);
1034 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1035 jvms->set_monoff(jvms->monoff()+jvms_adj);
1036 jvms->set_scloff(jvms->scloff()+jvms_adj);
1037 jvms->set_endoff(jvms->endoff()+jvms_adj);
1038 }
1039 }
1040
1041 #ifdef ASSERT
1042 bool CallJavaNode::validate_symbolic_info() const {
1043 if (method() == nullptr) {
1044 return true; // call into runtime or uncommon trap
1045 }
1046 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1047 ciMethod* callee = method();
1048 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1049 assert(override_symbolic_info(), "should be set");
1050 }
1051 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1052 return true;
1053 }
1054 #endif
1055
1056 #ifndef PRODUCT
1057 void CallJavaNode::dump_spec(outputStream* st) const {
1058 if( _method ) _method->print_short_name(st);
1059 CallNode::dump_spec(st);
1060 }
1061
1062 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1063 if (_method) {
1064 _method->print_short_name(st);
1065 } else {
1066 st->print("<?>");
1067 }
1068 }
1069 #endif
1070
1071 //=============================================================================
1072 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1073 bool CallStaticJavaNode::cmp( const Node &n ) const {
1074 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1075 return CallJavaNode::cmp(call);
1076 }
1077
1078 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1079 CallGenerator* cg = generator();
1080 if (can_reshape && cg != nullptr) {
1081 assert(IncrementalInlineMH, "required");
1082 assert(cg->call_node() == this, "mismatch");
1083 assert(cg->is_mh_late_inline(), "not virtual");
1084
1085 // Check whether this MH handle call becomes a candidate for inlining.
1086 ciMethod* callee = cg->method();
1087 vmIntrinsics::ID iid = callee->intrinsic_id();
1088 if (iid == vmIntrinsics::_invokeBasic) {
1089 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1090 phase->C->prepend_late_inline(cg);
1091 set_generator(nullptr);
1092 }
1093 } else if (iid == vmIntrinsics::_linkToNative) {
1094 // never retry
1095 } else {
1096 assert(callee->has_member_arg(), "wrong type of call?");
1097 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1098 phase->C->prepend_late_inline(cg);
1111
1112 //----------------------------uncommon_trap_request----------------------------
1113 // If this is an uncommon trap, return the request code, else zero.
1114 int CallStaticJavaNode::uncommon_trap_request() const {
1115 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1116 }
1117 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1118 #ifndef PRODUCT
1119 if (!(call->req() > TypeFunc::Parms &&
1120 call->in(TypeFunc::Parms) != nullptr &&
1121 call->in(TypeFunc::Parms)->is_Con() &&
1122 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1123 assert(in_dump() != 0, "OK if dumping");
1124 tty->print("[bad uncommon trap]");
1125 return 0;
1126 }
1127 #endif
1128 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1129 }
1130
1131 #ifndef PRODUCT
1132 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1133 st->print("# Static ");
1134 if (_name != nullptr) {
1135 st->print("%s", _name);
1136 int trap_req = uncommon_trap_request();
1137 if (trap_req != 0) {
1138 char buf[100];
1139 st->print("(%s)",
1140 Deoptimization::format_trap_request(buf, sizeof(buf),
1141 trap_req));
1142 }
1143 st->print(" ");
1144 }
1145 CallJavaNode::dump_spec(st);
1146 }
1147
1148 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1149 if (_method) {
1150 _method->print_short_name(st);
1215 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1216 bool CallRuntimeNode::cmp( const Node &n ) const {
1217 CallRuntimeNode &call = (CallRuntimeNode&)n;
1218 return CallNode::cmp(call) && !strcmp(_name,call._name);
1219 }
1220 #ifndef PRODUCT
1221 void CallRuntimeNode::dump_spec(outputStream *st) const {
1222 st->print("# ");
1223 st->print("%s", _name);
1224 CallNode::dump_spec(st);
1225 }
1226 #endif
1227 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1228 bool CallLeafVectorNode::cmp( const Node &n ) const {
1229 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1230 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1231 }
1232
1233 //------------------------------calling_convention-----------------------------
1234 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1235 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1236 }
1237
1238 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1239 #ifdef ASSERT
1240 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1241 "return vector size must match");
1242 const TypeTuple* d = tf()->domain();
1243 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1244 Node* arg = in(i);
1245 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1246 "vector argument size must match");
1247 }
1248 #endif
1249
1250 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1251 }
1252
1253 //=============================================================================
1254 //------------------------------calling_convention-----------------------------
1255
1256
1257 //=============================================================================
1258 #ifndef PRODUCT
1259 void CallLeafNode::dump_spec(outputStream *st) const {
1260 st->print("# ");
1261 st->print("%s", _name);
1262 CallNode::dump_spec(st);
1263 }
1264 #endif
1265
1266 //=============================================================================
1267
1268 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1269 assert(verify_jvms(jvms), "jvms must match");
1270 int loc = jvms->locoff() + idx;
1271 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1272 // If current local idx is top then local idx - 1 could
1273 // be a long/double that needs to be killed since top could
1274 // represent the 2nd half of the long/double.
1275 uint ideal = in(loc -1)->ideal_reg();
1276 if (ideal == Op_RegD || ideal == Op_RegL) {
1277 // set other (low index) half to top
1278 set_req(loc - 1, in(loc));
1279 }
1280 }
1281 set_req(loc, c);
1282 }
1283
1284 uint SafePointNode::size_of() const { return sizeof(*this); }
1285 bool SafePointNode::cmp( const Node &n ) const {
1296 }
1297 }
1298
1299
1300 //----------------------------next_exception-----------------------------------
1301 SafePointNode* SafePointNode::next_exception() const {
1302 if (len() == req()) {
1303 return nullptr;
1304 } else {
1305 Node* n = in(req());
1306 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1307 return (SafePointNode*) n;
1308 }
1309 }
1310
1311
1312 //------------------------------Ideal------------------------------------------
1313 // Skip over any collapsed Regions
1314 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1315 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1316 return remove_dead_region(phase, can_reshape) ? this : nullptr;
1317 }
1318
1319 //------------------------------Identity---------------------------------------
1320 // Remove obviously duplicate safepoints
1321 Node* SafePointNode::Identity(PhaseGVN* phase) {
1322
1323 // If you have back to back safepoints, remove one
1324 if (in(TypeFunc::Control)->is_SafePoint()) {
1325 Node* out_c = unique_ctrl_out_or_null();
1326 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1327 // outer loop's safepoint could confuse removal of the outer loop.
1328 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1329 return in(TypeFunc::Control);
1330 }
1331 }
1332
1333 // Transforming long counted loops requires a safepoint node. Do not
1334 // eliminate a safepoint until loop opts are over.
1335 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1336 Node *n0 = in(0)->in(0);
1454 }
1455
1456 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1457 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1458 int nb = igvn->C->root()->find_prec_edge(this);
1459 if (nb != -1) {
1460 igvn->delete_precedence_of(igvn->C->root(), nb);
1461 }
1462 }
1463
1464 //============== SafePointScalarObjectNode ==============
1465
1466 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1467 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1468 _first_index(first_index),
1469 _depth(depth),
1470 _n_fields(n_fields),
1471 _alloc(alloc)
1472 {
1473 #ifdef ASSERT
1474 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1475 alloc->dump();
1476 assert(false, "unexpected call node");
1477 }
1478 #endif
1479 init_class_id(Class_SafePointScalarObject);
1480 }
1481
1482 // Do not allow value-numbering for SafePointScalarObject node.
1483 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1484 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1485 return (&n == this); // Always fail except on self
1486 }
1487
1488 uint SafePointScalarObjectNode::ideal_reg() const {
1489 return 0; // No matching to machine instruction
1490 }
1491
1492 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1493 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1494 }
1559 new_node = false;
1560 return (SafePointScalarMergeNode*)cached;
1561 }
1562 new_node = true;
1563 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1564 sosn_map->Insert((void*)this, (void*)res);
1565 return res;
1566 }
1567
1568 #ifndef PRODUCT
1569 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1570 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1571 }
1572 #endif
1573
1574 //=============================================================================
1575 uint AllocateNode::size_of() const { return sizeof(*this); }
1576
1577 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1578 Node *ctrl, Node *mem, Node *abio,
1579 Node *size, Node *klass_node, Node *initial_test)
1580 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1581 {
1582 init_class_id(Class_Allocate);
1583 init_flags(Flag_is_macro);
1584 _is_scalar_replaceable = false;
1585 _is_non_escaping = false;
1586 _is_allocation_MemBar_redundant = false;
1587 Node *topnode = C->top();
1588
1589 init_req( TypeFunc::Control , ctrl );
1590 init_req( TypeFunc::I_O , abio );
1591 init_req( TypeFunc::Memory , mem );
1592 init_req( TypeFunc::ReturnAdr, topnode );
1593 init_req( TypeFunc::FramePtr , topnode );
1594 init_req( AllocSize , size);
1595 init_req( KlassNode , klass_node);
1596 init_req( InitialTest , initial_test);
1597 init_req( ALength , topnode);
1598 init_req( ValidLengthTest , topnode);
1599 C->add_macro_node(this);
1600 }
1601
1602 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1603 {
1604 assert(initializer != nullptr && initializer->is_object_initializer(),
1605 "unexpected initializer method");
1606 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1607 if (analyzer == nullptr) {
1608 return;
1609 }
1610
1611 // Allocation node is first parameter in its initializer
1612 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1613 _is_allocation_MemBar_redundant = true;
1614 }
1615 }
1616 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
1617 Node* mark_node = nullptr;
1618 // For now only enable fast locking for non-array types
1619 mark_node = phase->MakeConX(markWord::prototype().value());
1620 return mark_node;
1621 }
1622
1623 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1624 // CastII, if appropriate. If we are not allowed to create new nodes, and
1625 // a CastII is appropriate, return null.
1626 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1627 Node *length = in(AllocateNode::ALength);
1628 assert(length != nullptr, "length is not null");
1629
1630 const TypeInt* length_type = phase->find_int_type(length);
1631 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1632
1633 if (ary_type != nullptr && length_type != nullptr) {
1634 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1635 if (narrow_length_type != length_type) {
1636 // Assert one of:
1637 // - the narrow_length is 0
1638 // - the narrow_length is not wider than length
1639 assert(narrow_length_type == TypeInt::ZERO ||
1640 (length_type->is_con() && narrow_length_type->is_con() &&
1994
1995 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
1996 st->print("%s", _kind_names[_kind]);
1997 }
1998 #endif
1999
2000 //=============================================================================
2001 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2002
2003 // perform any generic optimizations first (returns 'this' or null)
2004 Node *result = SafePointNode::Ideal(phase, can_reshape);
2005 if (result != nullptr) return result;
2006 // Don't bother trying to transform a dead node
2007 if (in(0) && in(0)->is_top()) return nullptr;
2008
2009 // Now see if we can optimize away this lock. We don't actually
2010 // remove the locking here, we simply set the _eliminate flag which
2011 // prevents macro expansion from expanding the lock. Since we don't
2012 // modify the graph, the value returned from this function is the
2013 // one computed above.
2014 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2015 //
2016 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2017 //
2018 ConnectionGraph *cgr = phase->C->congraph();
2019 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2020 assert(!is_eliminated() || is_coarsened(), "sanity");
2021 // The lock could be marked eliminated by lock coarsening
2022 // code during first IGVN before EA. Replace coarsened flag
2023 // to eliminate all associated locks/unlocks.
2024 #ifdef ASSERT
2025 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2026 #endif
2027 this->set_non_esc_obj();
2028 return result;
2029 }
2030
2031 if (!phase->C->do_locks_coarsening()) {
2032 return result; // Compiling without locks coarsening
2033 }
2034 //
2195 }
2196
2197 //=============================================================================
2198 uint UnlockNode::size_of() const { return sizeof(*this); }
2199
2200 //=============================================================================
2201 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2202
2203 // perform any generic optimizations first (returns 'this' or null)
2204 Node *result = SafePointNode::Ideal(phase, can_reshape);
2205 if (result != nullptr) return result;
2206 // Don't bother trying to transform a dead node
2207 if (in(0) && in(0)->is_top()) return nullptr;
2208
2209 // Now see if we can optimize away this unlock. We don't actually
2210 // remove the unlocking here, we simply set the _eliminate flag which
2211 // prevents macro expansion from expanding the unlock. Since we don't
2212 // modify the graph, the value returned from this function is the
2213 // one computed above.
2214 // Escape state is defined after Parse phase.
2215 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2216 //
2217 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2218 //
2219 ConnectionGraph *cgr = phase->C->congraph();
2220 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2221 assert(!is_eliminated() || is_coarsened(), "sanity");
2222 // The lock could be marked eliminated by lock coarsening
2223 // code during first IGVN before EA. Replace coarsened flag
2224 // to eliminate all associated locks/unlocks.
2225 #ifdef ASSERT
2226 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2227 #endif
2228 this->set_non_esc_obj();
2229 }
2230 }
2231 return result;
2232 }
2233
2234 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2235 if (C == nullptr) {
2275 }
2276 // unrelated
2277 return false;
2278 }
2279
2280 if (dest_t->isa_aryptr()) {
2281 // arraycopy or array clone
2282 if (t_oop->isa_instptr()) {
2283 return false;
2284 }
2285 if (!t_oop->isa_aryptr()) {
2286 return true;
2287 }
2288
2289 const Type* elem = dest_t->is_aryptr()->elem();
2290 if (elem == Type::BOTTOM) {
2291 // An array but we don't know what elements are
2292 return true;
2293 }
2294
2295 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2296 uint dest_alias = phase->C->get_alias_index(dest_t);
2297 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2298
2299 return dest_alias == t_oop_alias;
2300 }
2301
2302 return true;
2303 }
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "ci/bcEscapeAnalyzer.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/c2/barrierSetC2.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/escape.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/locknode.hpp"
40 #include "opto/machnode.hpp"
41 #include "opto/matcher.hpp"
42 #include "opto/parse.hpp"
43 #include "opto/regalloc.hpp"
44 #include "opto/regmask.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "utilities/powerOfTwo.hpp"
50 #include "code/vmreg.hpp"
51
52 // Portions of code courtesy of Clifford Click
53
54 // Optimization - Graph Style
55
56 //=============================================================================
57 uint StartNode::size_of() const { return sizeof(*this); }
58 bool StartNode::cmp( const Node &n ) const
59 { return _domain == ((StartNode&)n)._domain; }
60 const Type *StartNode::bottom_type() const { return _domain; }
61 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
62 #ifndef PRODUCT
63 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
64 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
65 #endif
66
67 //------------------------------Ideal------------------------------------------
68 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
69 return remove_dead_region(phase, can_reshape) ? this : nullptr;
70 }
71
72 //------------------------------calling_convention-----------------------------
73 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
74 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
75 }
76
77 //------------------------------Registers--------------------------------------
78 const RegMask &StartNode::in_RegMask(uint) const {
79 return RegMask::Empty;
80 }
81
82 //------------------------------match------------------------------------------
83 // Construct projections for incoming parameters, and their RegMask info
84 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
85 switch (proj->_con) {
86 case TypeFunc::Control:
87 case TypeFunc::I_O:
88 case TypeFunc::Memory:
89 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
90 case TypeFunc::FramePtr:
91 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
92 case TypeFunc::ReturnAdr:
93 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
94 case TypeFunc::Parms:
95 default: {
96 uint parm_num = proj->_con - TypeFunc::Parms;
97 const Type *t = _domain->field_at(proj->_con);
98 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
99 return new ConNode(Type::TOP);
100 uint ideal_reg = t->ideal_reg();
101 RegMask &rm = match->_calling_convention_mask[parm_num];
102 return new MachProjNode(this,proj->_con,rm,ideal_reg);
103 }
104 }
105 return nullptr;
106 }
107
108 //=============================================================================
109 const char * const ParmNode::names[TypeFunc::Parms+1] = {
110 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
111 };
112
113 #ifndef PRODUCT
114 void ParmNode::dump_spec(outputStream *st) const {
115 if( _con < TypeFunc::Parms ) {
116 st->print("%s", names[_con]);
117 } else {
118 st->print("Parm%d: ",_con-TypeFunc::Parms);
119 // Verbose and WizardMode dump bottom_type for all nodes
120 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
121 }
122 }
123
124 void ParmNode::dump_compact_spec(outputStream *st) const {
125 if (_con < TypeFunc::Parms) {
126 st->print("%s", names[_con]);
127 } else {
473 if (cik->is_instance_klass()) {
474 cik->print_name_on(st);
475 iklass = cik->as_instance_klass();
476 } else if (cik->is_type_array_klass()) {
477 cik->as_array_klass()->base_element_type()->print_name_on(st);
478 st->print("[%d]", spobj->n_fields());
479 } else if (cik->is_obj_array_klass()) {
480 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
481 if (cie->is_instance_klass()) {
482 cie->print_name_on(st);
483 } else if (cie->is_type_array_klass()) {
484 cie->as_array_klass()->base_element_type()->print_name_on(st);
485 } else {
486 ShouldNotReachHere();
487 }
488 st->print("[%d]", spobj->n_fields());
489 int ndim = cik->as_array_klass()->dimension() - 1;
490 while (ndim-- > 0) {
491 st->print("[]");
492 }
493 } else if (cik->is_flat_array_klass()) {
494 ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
495 cie->print_name_on(st);
496 st->print("[%d]", spobj->n_fields());
497 int ndim = cik->as_array_klass()->dimension() - 1;
498 while (ndim-- > 0) {
499 st->print("[]");
500 }
501 }
502 st->print("={");
503 uint nf = spobj->n_fields();
504 if (nf > 0) {
505 uint first_ind = spobj->first_index(mcall->jvms());
506 if (iklass != nullptr && iklass->is_inlinetype()) {
507 Node* init_node = mcall->in(first_ind++);
508 if (!init_node->is_top()) {
509 st->print(" [is_init");
510 format_helper(regalloc, st, init_node, ":", -1, nullptr);
511 }
512 }
513 Node* fld_node = mcall->in(first_ind);
514 ciField* cifield;
515 if (iklass != nullptr) {
516 st->print(" [");
517 cifield = iklass->nonstatic_field_at(0);
518 cifield->print_name_on(st);
519 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
520 } else {
521 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
522 }
523 for (uint j = 1; j < nf; j++) {
524 fld_node = mcall->in(first_ind+j);
525 if (iklass != nullptr) {
526 st->print(", [");
527 cifield = iklass->nonstatic_field_at(j);
528 cifield->print_name_on(st);
529 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
530 } else {
531 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
532 }
706 if (i == TypeFunc::Parms) st->print("(");
707 Node* p = in(i);
708 if (p != nullptr) {
709 p->dump_idx(false, st, dc);
710 st->print(" ");
711 } else {
712 st->print("_ ");
713 }
714 }
715 st->print(")");
716 }
717
718 void CallNode::dump_spec(outputStream *st) const {
719 st->print(" ");
720 if (tf() != nullptr) tf()->dump_on(st);
721 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
722 if (jvms() != nullptr) jvms()->dump_spec(st);
723 }
724 #endif
725
726 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
727 const Type* CallNode::Value(PhaseGVN* phase) const {
728 if (!in(0) || phase->type(in(0)) == Type::TOP) {
729 return Type::TOP;
730 }
731 return tf()->range_cc();
732 }
733
734 //------------------------------calling_convention-----------------------------
735 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
736 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
737 // The call to that stub is a special case: its inputs are
738 // multiple values returned from a call and so it should follow
739 // the return convention.
740 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
741 return;
742 }
743 // Use the standard compiler calling convention
744 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
745 }
746
747
748 //------------------------------match------------------------------------------
749 // Construct projections for control, I/O, memory-fields, ..., and
750 // return result(s) along with their RegMask info
751 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
752 uint con = proj->_con;
753 const TypeTuple* range_cc = tf()->range_cc();
754 if (con >= TypeFunc::Parms) {
755 if (tf()->returns_inline_type_as_fields()) {
756 // The call returns multiple values (inline type fields): we
757 // create one projection per returned value.
758 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
759 uint ideal_reg = range_cc->field_at(con)->ideal_reg();
760 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
761 } else {
762 if (con == TypeFunc::Parms) {
763 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
764 OptoRegPair regs = Opcode() == Op_CallLeafVector
765 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
766 : match->c_return_value(ideal_reg);
767 RegMask rm = RegMask(regs.first());
768
769 if (Opcode() == Op_CallLeafVector) {
770 // If the return is in vector, compute appropriate regmask taking into account the whole range
771 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
772 if(OptoReg::is_valid(regs.second())) {
773 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
774 rm.Insert(r);
775 }
776 }
777 }
778 }
779
780 if (OptoReg::is_valid(regs.second())) {
781 rm.Insert(regs.second());
782 }
783 return new MachProjNode(this,con,rm,ideal_reg);
784 } else {
785 assert(con == TypeFunc::Parms+1, "only one return value");
786 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
787 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
788 }
789 }
790 }
791
792 switch (con) {
793 case TypeFunc::Control:
794 case TypeFunc::I_O:
795 case TypeFunc::Memory:
796 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
797
798 case TypeFunc::ReturnAdr:
799 case TypeFunc::FramePtr:
800 default:
801 ShouldNotReachHere();
802 }
803 return nullptr;
804 }
805
806 // Do we Match on this edge index or not? Match no edges
807 uint CallNode::match_edge(uint idx) const {
808 return 0;
809 }
810
811 //
812 // Determine whether the call could modify the field of the specified
813 // instance at the specified offset.
814 //
815 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
816 assert((t_oop != nullptr), "sanity");
817 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
818 const TypeTuple* args = _tf->domain_sig();
819 Node* dest = nullptr;
820 // Stubs that can be called once an ArrayCopyNode is expanded have
821 // different signatures. Look for the second pointer argument,
822 // that is the destination of the copy.
823 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
824 if (args->field_at(i)->isa_ptr()) {
825 j++;
826 if (j == 2) {
827 dest = in(i);
828 break;
829 }
830 }
831 }
832 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
833 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
834 return true;
835 }
836 return false;
837 }
838 if (t_oop->is_known_instance()) {
847 Node* proj = proj_out_or_null(TypeFunc::Parms);
848 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
849 return false;
850 }
851 }
852 if (is_CallJava() && as_CallJava()->method() != nullptr) {
853 ciMethod* meth = as_CallJava()->method();
854 if (meth->is_getter()) {
855 return false;
856 }
857 // May modify (by reflection) if an boxing object is passed
858 // as argument or returned.
859 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
860 if (proj != nullptr) {
861 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
862 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
863 (inst_t->instance_klass() == boxing_klass))) {
864 return true;
865 }
866 }
867 const TypeTuple* d = tf()->domain_cc();
868 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
869 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
870 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
871 (inst_t->instance_klass() == boxing_klass))) {
872 return true;
873 }
874 }
875 return false;
876 }
877 }
878 return true;
879 }
880
881 // Does this call have a direct reference to n other than debug information?
882 bool CallNode::has_non_debug_use(Node* n) {
883 const TypeTuple* d = tf()->domain_cc();
884 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
885 if (in(i) == n) {
886 return true;
887 }
888 }
889 return false;
890 }
891
892 bool CallNode::has_debug_use(Node* n) {
893 if (jvms() != nullptr) {
894 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
895 if (in(i) == n) {
896 return true;
897 }
898 }
899 }
900 return false;
901 }
902
903 // Returns the unique CheckCastPP of a call
904 // or 'this' if there are several CheckCastPP or unexpected uses
905 // or returns null if there is no one.
906 Node *CallNode::result_cast() {
907 Node *cast = nullptr;
908
909 Node *p = proj_out_or_null(TypeFunc::Parms);
910 if (p == nullptr)
911 return nullptr;
912
913 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
914 Node *use = p->fast_out(i);
915 if (use->is_CheckCastPP()) {
916 if (cast != nullptr) {
917 return this; // more than 1 CheckCastPP
918 }
919 cast = use;
920 } else if (!use->is_Initialize() &&
921 !use->is_AddP() &&
922 use->Opcode() != Op_MemBarStoreStore) {
923 // Expected uses are restricted to a CheckCastPP, an Initialize
924 // node, a MemBarStoreStore (clone) and AddP nodes. If we
925 // encounter any other use (a Phi node can be seen in rare
926 // cases) return this to prevent incorrect optimizations.
927 return this;
928 }
929 }
930 return cast;
931 }
932
933
934 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
935 uint max_res = TypeFunc::Parms-1;
936 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
937 ProjNode *pn = fast_out(i)->as_Proj();
938 max_res = MAX2(max_res, pn->_con);
939 }
940
941 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
942
943 uint projs_size = sizeof(CallProjections);
944 if (max_res > TypeFunc::Parms) {
945 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
946 }
947 char* projs_storage = resource_allocate_bytes(projs_size);
948 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
949
950 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
951 ProjNode *pn = fast_out(i)->as_Proj();
952 if (pn->outcnt() == 0) continue;
953 switch (pn->_con) {
954 case TypeFunc::Control:
955 {
956 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
957 projs->fallthrough_proj = pn;
958 const Node* cn = pn->unique_ctrl_out_or_null();
959 if (cn != nullptr && cn->is_Catch()) {
960 ProjNode *cpn = nullptr;
961 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
962 cpn = cn->fast_out(k)->as_Proj();
963 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
964 if (cpn->_con == CatchProjNode::fall_through_index)
965 projs->fallthrough_catchproj = cpn;
966 else {
967 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
968 projs->catchall_catchproj = cpn;
974 case TypeFunc::I_O:
975 if (pn->_is_io_use)
976 projs->catchall_ioproj = pn;
977 else
978 projs->fallthrough_ioproj = pn;
979 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
980 Node* e = pn->out(j);
981 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
982 assert(projs->exobj == nullptr, "only one");
983 projs->exobj = e;
984 }
985 }
986 break;
987 case TypeFunc::Memory:
988 if (pn->_is_io_use)
989 projs->catchall_memproj = pn;
990 else
991 projs->fallthrough_memproj = pn;
992 break;
993 case TypeFunc::Parms:
994 projs->resproj[0] = pn;
995 break;
996 default:
997 assert(pn->_con <= max_res, "unexpected projection from allocation node.");
998 projs->resproj[pn->_con-TypeFunc::Parms] = pn;
999 break;
1000 }
1001 }
1002
1003 // The resproj may not exist because the result could be ignored
1004 // and the exception object may not exist if an exception handler
1005 // swallows the exception but all the other must exist and be found.
1006 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1007 assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found");
1008 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1009 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
1010 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
1011 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
1012 if (separate_io_proj) {
1013 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
1014 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
1015 }
1016 return projs;
1017 }
1018
1019 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1020 #ifdef ASSERT
1021 // Validate attached generator
1022 CallGenerator* cg = generator();
1023 if (cg != nullptr) {
1024 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
1025 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1026 }
1027 #endif // ASSERT
1028 return SafePointNode::Ideal(phase, can_reshape);
1029 }
1030
1031 bool CallNode::is_call_to_arraycopystub() const {
1032 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1033 return true;
1034 }
1035 return false;
1036 }
1037
1038 //=============================================================================
1039 uint CallJavaNode::size_of() const { return sizeof(*this); }
1040 bool CallJavaNode::cmp( const Node &n ) const {
1041 CallJavaNode &call = (CallJavaNode&)n;
1042 return CallNode::cmp(call) && _method == call._method &&
1043 _override_symbolic_info == call._override_symbolic_info;
1044 }
1045
1046 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1047 // Copy debug information and adjust JVMState information
1048 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1049 uint new_dbg_start = tf()->domain_sig()->cnt();
1050 int jvms_adj = new_dbg_start - old_dbg_start;
1051 assert (new_dbg_start == req(), "argument count mismatch");
1052 Compile* C = phase->C;
1053
1054 // SafePointScalarObject node could be referenced several times in debug info.
1055 // Use Dict to record cloned nodes.
1056 Dict* sosn_map = new Dict(cmpkey,hashkey);
1057 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1058 Node* old_in = sfpt->in(i);
1059 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1060 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1061 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1062 bool new_node;
1063 Node* new_in = old_sosn->clone(sosn_map, new_node);
1064 if (new_node) { // New node?
1065 new_in->set_req(0, C->root()); // reset control edge
1066 new_in = phase->transform(new_in); // Register new node.
1067 }
1068 old_in = new_in;
1069 }
1070 add_req(old_in);
1071 }
1072
1073 // JVMS may be shared so clone it before we modify it
1074 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1075 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1076 jvms->set_map(this);
1077 jvms->set_locoff(jvms->locoff()+jvms_adj);
1078 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1079 jvms->set_monoff(jvms->monoff()+jvms_adj);
1080 jvms->set_scloff(jvms->scloff()+jvms_adj);
1081 jvms->set_endoff(jvms->endoff()+jvms_adj);
1082 }
1083 }
1084
1085 #ifdef ASSERT
1086 bool CallJavaNode::validate_symbolic_info() const {
1087 if (method() == nullptr) {
1088 return true; // call into runtime or uncommon trap
1089 }
1090 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1091 if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1092 return true;
1093 }
1094 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1095 ciMethod* callee = method();
1096 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1097 assert(override_symbolic_info(), "should be set");
1098 }
1099 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1100 return true;
1101 }
1102 #endif
1103
1104 #ifndef PRODUCT
1105 void CallJavaNode::dump_spec(outputStream* st) const {
1106 if( _method ) _method->print_short_name(st);
1107 CallNode::dump_spec(st);
1108 }
1109
1110 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1111 if (_method) {
1112 _method->print_short_name(st);
1113 } else {
1114 st->print("<?>");
1115 }
1116 }
1117 #endif
1118
1119 //=============================================================================
1120 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1121 bool CallStaticJavaNode::cmp( const Node &n ) const {
1122 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1123 return CallJavaNode::cmp(call);
1124 }
1125
1126 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1127 if (can_reshape && uncommon_trap_request() != 0) {
1128 PhaseIterGVN* igvn = phase->is_IterGVN();
1129 if (remove_unknown_flat_array_load(igvn, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
1130 if (!in(0)->is_Region()) {
1131 igvn->replace_input_of(this, 0, phase->C->top());
1132 }
1133 return this;
1134 }
1135 }
1136
1137 CallGenerator* cg = generator();
1138 if (can_reshape && cg != nullptr) {
1139 assert(IncrementalInlineMH, "required");
1140 assert(cg->call_node() == this, "mismatch");
1141 assert(cg->is_mh_late_inline(), "not virtual");
1142
1143 // Check whether this MH handle call becomes a candidate for inlining.
1144 ciMethod* callee = cg->method();
1145 vmIntrinsics::ID iid = callee->intrinsic_id();
1146 if (iid == vmIntrinsics::_invokeBasic) {
1147 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1148 phase->C->prepend_late_inline(cg);
1149 set_generator(nullptr);
1150 }
1151 } else if (iid == vmIntrinsics::_linkToNative) {
1152 // never retry
1153 } else {
1154 assert(callee->has_member_arg(), "wrong type of call?");
1155 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1156 phase->C->prepend_late_inline(cg);
1169
1170 //----------------------------uncommon_trap_request----------------------------
1171 // If this is an uncommon trap, return the request code, else zero.
1172 int CallStaticJavaNode::uncommon_trap_request() const {
1173 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1174 }
1175 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1176 #ifndef PRODUCT
1177 if (!(call->req() > TypeFunc::Parms &&
1178 call->in(TypeFunc::Parms) != nullptr &&
1179 call->in(TypeFunc::Parms)->is_Con() &&
1180 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1181 assert(in_dump() != 0, "OK if dumping");
1182 tty->print("[bad uncommon trap]");
1183 return 0;
1184 }
1185 #endif
1186 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1187 }
1188
1189 // Split if can cause the flat array branch of an array load with unknown type (see
1190 // Parse::array_load) to end in an uncommon trap. In that case, the call to
1191 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState.
1192 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) {
1193 if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) {
1194 return false;
1195 }
1196 if (ctl->is_Region()) {
1197 bool res = false;
1198 for (uint i = 1; i < ctl->req(); i++) {
1199 MergeMemNode* mm = mem->clone()->as_MergeMem();
1200 for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1201 Node* m = mms.memory();
1202 if (m->is_Phi() && m->in(0) == ctl) {
1203 mms.set_memory(m->in(i));
1204 }
1205 }
1206 if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) {
1207 res = true;
1208 if (!ctl->in(i)->is_Region()) {
1209 igvn->replace_input_of(ctl, i, igvn->C->top());
1210 }
1211 }
1212 igvn->remove_dead_node(mm);
1213 }
1214 return res;
1215 }
1216 // Verify the control flow is ok
1217 Node* call = ctl;
1218 MemBarNode* membar = nullptr;
1219 for (;;) {
1220 if (call == nullptr || call->is_top()) {
1221 return false;
1222 }
1223 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1224 call = call->in(0);
1225 } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() &&
1226 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1227 assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
1228 membar = call->in(0)->in(0)->as_MemBar();
1229 break;
1230 } else {
1231 return false;
1232 }
1233 }
1234
1235 JVMState* jvms = call->jvms();
1236 if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1237 return false;
1238 }
1239
1240 Node* call_mem = call->in(TypeFunc::Memory);
1241 if (call_mem == nullptr || call_mem->is_top()) {
1242 return false;
1243 }
1244 if (!call_mem->is_MergeMem()) {
1245 call_mem = MergeMemNode::make(call_mem);
1246 igvn->register_new_node_with_optimizer(call_mem);
1247 }
1248
1249 // Verify that there's no unexpected side effect
1250 for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1251 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1252 Node* m2 = mms2.memory2();
1253
1254 for (uint i = 0; i < 100; i++) {
1255 if (m1 == m2) {
1256 break;
1257 } else if (m1->is_Proj()) {
1258 m1 = m1->in(0);
1259 } else if (m1->is_MemBar()) {
1260 m1 = m1->in(TypeFunc::Memory);
1261 } else if (m1->Opcode() == Op_CallStaticJava &&
1262 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1263 if (m1 != call) {
1264 return false;
1265 }
1266 break;
1267 } else if (m1->is_MergeMem()) {
1268 MergeMemNode* mm = m1->as_MergeMem();
1269 int idx = mms2.alias_idx();
1270 if (idx == Compile::AliasIdxBot) {
1271 m1 = mm->base_memory();
1272 } else {
1273 m1 = mm->memory_at(idx);
1274 }
1275 } else {
1276 return false;
1277 }
1278 }
1279 }
1280 if (call_mem->outcnt() == 0) {
1281 igvn->remove_dead_node(call_mem);
1282 }
1283
1284 // Remove membar preceding the call
1285 membar->remove(igvn);
1286
1287 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
1288 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr);
1289 unc->init_req(TypeFunc::Control, call->in(0));
1290 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1291 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1292 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
1293 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1294 unc->init_req(TypeFunc::Parms+0, unc_arg);
1295 unc->set_cnt(PROB_UNLIKELY_MAG(4));
1296 unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1297
1298 // Replace the call with an uncommon trap
1299 igvn->replace_input_of(call, 0, igvn->C->top());
1300
1301 igvn->register_new_node_with_optimizer(unc);
1302
1303 Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control));
1304 Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1305 igvn->add_input_to(igvn->C->root(), halt);
1306
1307 return true;
1308 }
1309
1310
1311 #ifndef PRODUCT
1312 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1313 st->print("# Static ");
1314 if (_name != nullptr) {
1315 st->print("%s", _name);
1316 int trap_req = uncommon_trap_request();
1317 if (trap_req != 0) {
1318 char buf[100];
1319 st->print("(%s)",
1320 Deoptimization::format_trap_request(buf, sizeof(buf),
1321 trap_req));
1322 }
1323 st->print(" ");
1324 }
1325 CallJavaNode::dump_spec(st);
1326 }
1327
1328 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1329 if (_method) {
1330 _method->print_short_name(st);
1395 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1396 bool CallRuntimeNode::cmp( const Node &n ) const {
1397 CallRuntimeNode &call = (CallRuntimeNode&)n;
1398 return CallNode::cmp(call) && !strcmp(_name,call._name);
1399 }
1400 #ifndef PRODUCT
1401 void CallRuntimeNode::dump_spec(outputStream *st) const {
1402 st->print("# ");
1403 st->print("%s", _name);
1404 CallNode::dump_spec(st);
1405 }
1406 #endif
1407 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1408 bool CallLeafVectorNode::cmp( const Node &n ) const {
1409 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1410 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1411 }
1412
1413 //------------------------------calling_convention-----------------------------
1414 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1415 if (_entry_point == nullptr) {
1416 // The call to that stub is a special case: its inputs are
1417 // multiple values returned from a call and so it should follow
1418 // the return convention.
1419 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1420 return;
1421 }
1422 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1423 }
1424
1425 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1426 #ifdef ASSERT
1427 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1428 "return vector size must match");
1429 const TypeTuple* d = tf()->domain_sig();
1430 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1431 Node* arg = in(i);
1432 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1433 "vector argument size must match");
1434 }
1435 #endif
1436
1437 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1438 }
1439
1440 //=============================================================================
1441 //------------------------------calling_convention-----------------------------
1442
1443
1444 //=============================================================================
1445 #ifndef PRODUCT
1446 void CallLeafNode::dump_spec(outputStream *st) const {
1447 st->print("# ");
1448 st->print("%s", _name);
1449 CallNode::dump_spec(st);
1450 }
1451 #endif
1452
1453 uint CallLeafNoFPNode::match_edge(uint idx) const {
1454 // Null entry point is a special case for which the target is in a
1455 // register. Need to match that edge.
1456 return entry_point() == nullptr && idx == TypeFunc::Parms;
1457 }
1458
1459 //=============================================================================
1460
1461 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1462 assert(verify_jvms(jvms), "jvms must match");
1463 int loc = jvms->locoff() + idx;
1464 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1465 // If current local idx is top then local idx - 1 could
1466 // be a long/double that needs to be killed since top could
1467 // represent the 2nd half of the long/double.
1468 uint ideal = in(loc -1)->ideal_reg();
1469 if (ideal == Op_RegD || ideal == Op_RegL) {
1470 // set other (low index) half to top
1471 set_req(loc - 1, in(loc));
1472 }
1473 }
1474 set_req(loc, c);
1475 }
1476
1477 uint SafePointNode::size_of() const { return sizeof(*this); }
1478 bool SafePointNode::cmp( const Node &n ) const {
1489 }
1490 }
1491
1492
1493 //----------------------------next_exception-----------------------------------
1494 SafePointNode* SafePointNode::next_exception() const {
1495 if (len() == req()) {
1496 return nullptr;
1497 } else {
1498 Node* n = in(req());
1499 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1500 return (SafePointNode*) n;
1501 }
1502 }
1503
1504
1505 //------------------------------Ideal------------------------------------------
1506 // Skip over any collapsed Regions
1507 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1508 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1509 if (remove_dead_region(phase, can_reshape)) {
1510 return this;
1511 }
1512 // Scalarize inline types in safepoint debug info.
1513 // Delay this until all inlining is over to avoid getting inconsistent debug info.
1514 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) {
1515 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
1516 Node* n = in(i)->uncast();
1517 if (n->is_InlineType()) {
1518 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
1519 }
1520 }
1521 }
1522 return nullptr;
1523 }
1524
1525 //------------------------------Identity---------------------------------------
1526 // Remove obviously duplicate safepoints
1527 Node* SafePointNode::Identity(PhaseGVN* phase) {
1528
1529 // If you have back to back safepoints, remove one
1530 if (in(TypeFunc::Control)->is_SafePoint()) {
1531 Node* out_c = unique_ctrl_out_or_null();
1532 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1533 // outer loop's safepoint could confuse removal of the outer loop.
1534 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1535 return in(TypeFunc::Control);
1536 }
1537 }
1538
1539 // Transforming long counted loops requires a safepoint node. Do not
1540 // eliminate a safepoint until loop opts are over.
1541 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1542 Node *n0 = in(0)->in(0);
1660 }
1661
1662 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1663 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1664 int nb = igvn->C->root()->find_prec_edge(this);
1665 if (nb != -1) {
1666 igvn->delete_precedence_of(igvn->C->root(), nb);
1667 }
1668 }
1669
1670 //============== SafePointScalarObjectNode ==============
1671
1672 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1673 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1674 _first_index(first_index),
1675 _depth(depth),
1676 _n_fields(n_fields),
1677 _alloc(alloc)
1678 {
1679 #ifdef ASSERT
1680 if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1681 alloc->dump();
1682 assert(false, "unexpected call node");
1683 }
1684 #endif
1685 init_class_id(Class_SafePointScalarObject);
1686 }
1687
1688 // Do not allow value-numbering for SafePointScalarObject node.
1689 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1690 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1691 return (&n == this); // Always fail except on self
1692 }
1693
1694 uint SafePointScalarObjectNode::ideal_reg() const {
1695 return 0; // No matching to machine instruction
1696 }
1697
1698 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1699 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1700 }
1765 new_node = false;
1766 return (SafePointScalarMergeNode*)cached;
1767 }
1768 new_node = true;
1769 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1770 sosn_map->Insert((void*)this, (void*)res);
1771 return res;
1772 }
1773
1774 #ifndef PRODUCT
1775 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1776 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1777 }
1778 #endif
1779
1780 //=============================================================================
1781 uint AllocateNode::size_of() const { return sizeof(*this); }
1782
1783 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1784 Node *ctrl, Node *mem, Node *abio,
1785 Node *size, Node *klass_node,
1786 Node* initial_test,
1787 InlineTypeNode* inline_type_node)
1788 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1789 {
1790 init_class_id(Class_Allocate);
1791 init_flags(Flag_is_macro);
1792 _is_scalar_replaceable = false;
1793 _is_non_escaping = false;
1794 _is_allocation_MemBar_redundant = false;
1795 _larval = false;
1796 Node *topnode = C->top();
1797
1798 init_req( TypeFunc::Control , ctrl );
1799 init_req( TypeFunc::I_O , abio );
1800 init_req( TypeFunc::Memory , mem );
1801 init_req( TypeFunc::ReturnAdr, topnode );
1802 init_req( TypeFunc::FramePtr , topnode );
1803 init_req( AllocSize , size);
1804 init_req( KlassNode , klass_node);
1805 init_req( InitialTest , initial_test);
1806 init_req( ALength , topnode);
1807 init_req( ValidLengthTest , topnode);
1808 init_req( InlineType , inline_type_node);
1809 // DefaultValue defaults to nullptr
1810 // RawDefaultValue defaults to nullptr
1811 C->add_macro_node(this);
1812 }
1813
1814 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1815 {
1816 assert(initializer != nullptr &&
1817 (initializer->is_object_constructor() || initializer->is_class_initializer()),
1818 "unexpected initializer method");
1819 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1820 if (analyzer == nullptr) {
1821 return;
1822 }
1823
1824 // Allocation node is first parameter in its initializer
1825 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1826 _is_allocation_MemBar_redundant = true;
1827 }
1828 }
1829
1830 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1831 Node* mark_node = nullptr;
1832 if (EnableValhalla) {
1833 Node* klass_node = in(AllocateNode::KlassNode);
1834 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1835 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1836 } else {
1837 mark_node = phase->MakeConX(markWord::prototype().value());
1838 }
1839 mark_node = phase->transform(mark_node);
1840 // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
1841 return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
1842 }
1843
1844 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1845 // CastII, if appropriate. If we are not allowed to create new nodes, and
1846 // a CastII is appropriate, return null.
1847 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1848 Node *length = in(AllocateNode::ALength);
1849 assert(length != nullptr, "length is not null");
1850
1851 const TypeInt* length_type = phase->find_int_type(length);
1852 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1853
1854 if (ary_type != nullptr && length_type != nullptr) {
1855 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1856 if (narrow_length_type != length_type) {
1857 // Assert one of:
1858 // - the narrow_length is 0
1859 // - the narrow_length is not wider than length
1860 assert(narrow_length_type == TypeInt::ZERO ||
1861 (length_type->is_con() && narrow_length_type->is_con() &&
2215
2216 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2217 st->print("%s", _kind_names[_kind]);
2218 }
2219 #endif
2220
2221 //=============================================================================
2222 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2223
2224 // perform any generic optimizations first (returns 'this' or null)
2225 Node *result = SafePointNode::Ideal(phase, can_reshape);
2226 if (result != nullptr) return result;
2227 // Don't bother trying to transform a dead node
2228 if (in(0) && in(0)->is_top()) return nullptr;
2229
2230 // Now see if we can optimize away this lock. We don't actually
2231 // remove the locking here, we simply set the _eliminate flag which
2232 // prevents macro expansion from expanding the lock. Since we don't
2233 // modify the graph, the value returned from this function is the
2234 // one computed above.
2235 const Type* obj_type = phase->type(obj_node());
2236 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2237 //
2238 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2239 //
2240 ConnectionGraph *cgr = phase->C->congraph();
2241 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2242 assert(!is_eliminated() || is_coarsened(), "sanity");
2243 // The lock could be marked eliminated by lock coarsening
2244 // code during first IGVN before EA. Replace coarsened flag
2245 // to eliminate all associated locks/unlocks.
2246 #ifdef ASSERT
2247 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2248 #endif
2249 this->set_non_esc_obj();
2250 return result;
2251 }
2252
2253 if (!phase->C->do_locks_coarsening()) {
2254 return result; // Compiling without locks coarsening
2255 }
2256 //
2417 }
2418
2419 //=============================================================================
2420 uint UnlockNode::size_of() const { return sizeof(*this); }
2421
2422 //=============================================================================
2423 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2424
2425 // perform any generic optimizations first (returns 'this' or null)
2426 Node *result = SafePointNode::Ideal(phase, can_reshape);
2427 if (result != nullptr) return result;
2428 // Don't bother trying to transform a dead node
2429 if (in(0) && in(0)->is_top()) return nullptr;
2430
2431 // Now see if we can optimize away this unlock. We don't actually
2432 // remove the unlocking here, we simply set the _eliminate flag which
2433 // prevents macro expansion from expanding the unlock. Since we don't
2434 // modify the graph, the value returned from this function is the
2435 // one computed above.
2436 // Escape state is defined after Parse phase.
2437 const Type* obj_type = phase->type(obj_node());
2438 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2439 //
2440 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2441 //
2442 ConnectionGraph *cgr = phase->C->congraph();
2443 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2444 assert(!is_eliminated() || is_coarsened(), "sanity");
2445 // The lock could be marked eliminated by lock coarsening
2446 // code during first IGVN before EA. Replace coarsened flag
2447 // to eliminate all associated locks/unlocks.
2448 #ifdef ASSERT
2449 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2450 #endif
2451 this->set_non_esc_obj();
2452 }
2453 }
2454 return result;
2455 }
2456
2457 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2458 if (C == nullptr) {
2498 }
2499 // unrelated
2500 return false;
2501 }
2502
2503 if (dest_t->isa_aryptr()) {
2504 // arraycopy or array clone
2505 if (t_oop->isa_instptr()) {
2506 return false;
2507 }
2508 if (!t_oop->isa_aryptr()) {
2509 return true;
2510 }
2511
2512 const Type* elem = dest_t->is_aryptr()->elem();
2513 if (elem == Type::BOTTOM) {
2514 // An array but we don't know what elements are
2515 return true;
2516 }
2517
2518 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2519 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2520 uint dest_alias = phase->C->get_alias_index(dest_t);
2521 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2522
2523 return dest_alias == t_oop_alias;
2524 }
2525
2526 return true;
2527 }
|