7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/bcEscapeAnalyzer.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "code/vmreg.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 uint StartNode::size_of() const { return sizeof(*this); }
55 bool StartNode::cmp( const Node &n ) const
56 { return _domain == ((StartNode&)n)._domain; }
57 const Type *StartNode::bottom_type() const { return _domain; }
58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
59 #ifndef PRODUCT
60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
62 #endif
63
64 //------------------------------Ideal------------------------------------------
65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
66 return remove_dead_region(phase, can_reshape) ? this : NULL;
67 }
68
69 //------------------------------calling_convention-----------------------------
70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
71 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
72 }
73
74 //------------------------------Registers--------------------------------------
75 const RegMask &StartNode::in_RegMask(uint) const {
76 return RegMask::Empty;
77 }
78
79 //------------------------------match------------------------------------------
80 // Construct projections for incoming parameters, and their RegMask info
81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
82 switch (proj->_con) {
83 case TypeFunc::Control:
84 case TypeFunc::I_O:
85 case TypeFunc::Memory:
86 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
87 case TypeFunc::FramePtr:
88 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
89 case TypeFunc::ReturnAdr:
90 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
91 case TypeFunc::Parms:
92 default: {
93 uint parm_num = proj->_con - TypeFunc::Parms;
94 const Type *t = _domain->field_at(proj->_con);
95 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
96 return new ConNode(Type::TOP);
97 uint ideal_reg = t->ideal_reg();
98 RegMask &rm = match->_calling_convention_mask[parm_num];
99 return new MachProjNode(this,proj->_con,rm,ideal_reg);
100 }
101 }
102 return NULL;
103 }
104
105 //------------------------------StartOSRNode----------------------------------
106 // The method start node for an on stack replacement adapter
107
108 //------------------------------osr_domain-----------------------------
109 const TypeTuple *StartOSRNode::osr_domain() {
110 const Type **fields = TypeTuple::fields(2);
111 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
112
113 return TypeTuple::make(TypeFunc::Parms+1, fields);
114 }
115
116 //=============================================================================
117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
118 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
119 };
120
121 #ifndef PRODUCT
122 void ParmNode::dump_spec(outputStream *st) const {
123 if( _con < TypeFunc::Parms ) {
124 st->print("%s", names[_con]);
125 } else {
126 st->print("Parm%d: ",_con-TypeFunc::Parms);
127 // Verbose and WizardMode dump bottom_type for all nodes
128 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
129 }
130 }
131
132 void ParmNode::dump_compact_spec(outputStream *st) const {
133 if (_con < TypeFunc::Parms) {
134 st->print("%s", names[_con]);
135 } else {
481 if (cik->is_instance_klass()) {
482 cik->print_name_on(st);
483 iklass = cik->as_instance_klass();
484 } else if (cik->is_type_array_klass()) {
485 cik->as_array_klass()->base_element_type()->print_name_on(st);
486 st->print("[%d]", spobj->n_fields());
487 } else if (cik->is_obj_array_klass()) {
488 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
489 if (cie->is_instance_klass()) {
490 cie->print_name_on(st);
491 } else if (cie->is_type_array_klass()) {
492 cie->as_array_klass()->base_element_type()->print_name_on(st);
493 } else {
494 ShouldNotReachHere();
495 }
496 st->print("[%d]", spobj->n_fields());
497 int ndim = cik->as_array_klass()->dimension() - 1;
498 while (ndim-- > 0) {
499 st->print("[]");
500 }
501 }
502 st->print("={");
503 uint nf = spobj->n_fields();
504 if (nf > 0) {
505 uint first_ind = spobj->first_index(mcall->jvms());
506 Node* fld_node = mcall->in(first_ind);
507 ciField* cifield;
508 if (iklass != NULL) {
509 st->print(" [");
510 cifield = iklass->nonstatic_field_at(0);
511 cifield->print_name_on(st);
512 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
513 } else {
514 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
515 }
516 for (uint j = 1; j < nf; j++) {
517 fld_node = mcall->in(first_ind+j);
518 if (iklass != NULL) {
519 st->print(", [");
520 cifield = iklass->nonstatic_field_at(j);
521 cifield->print_name_on(st);
522 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
523 } else {
524 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
525 }
699 if (i == TypeFunc::Parms) st->print("(");
700 Node* p = in(i);
701 if (p != nullptr) {
702 p->dump_idx(false, st, dc);
703 st->print(" ");
704 } else {
705 st->print("_ ");
706 }
707 }
708 st->print(")");
709 }
710
711 void CallNode::dump_spec(outputStream *st) const {
712 st->print(" ");
713 if (tf() != NULL) tf()->dump_on(st);
714 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
715 if (jvms() != NULL) jvms()->dump_spec(st);
716 }
717 #endif
718
719 const Type *CallNode::bottom_type() const { return tf()->range(); }
720 const Type* CallNode::Value(PhaseGVN* phase) const {
721 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
722 return tf()->range();
723 }
724
725 //------------------------------calling_convention-----------------------------
726 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
727 // Use the standard compiler calling convention
728 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
729 }
730
731
732 //------------------------------match------------------------------------------
733 // Construct projections for control, I/O, memory-fields, ..., and
734 // return result(s) along with their RegMask info
735 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
736 switch (proj->_con) {
737 case TypeFunc::Control:
738 case TypeFunc::I_O:
739 case TypeFunc::Memory:
740 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
741
742 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
743 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
744 // 2nd half of doubles and longs
745 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
746
747 case TypeFunc::Parms: { // Normal returns
748 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
749 OptoRegPair regs = Opcode() == Op_CallLeafVector
750 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
751 : is_CallRuntime()
752 ? match->c_return_value(ideal_reg) // Calls into C runtime
753 : match-> return_value(ideal_reg); // Calls into compiled Java code
754 RegMask rm = RegMask(regs.first());
755
756 if (Opcode() == Op_CallLeafVector) {
757 // If the return is in vector, compute appropriate regmask taking into account the whole range
758 if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
759 if(OptoReg::is_valid(regs.second())) {
760 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
761 rm.Insert(r);
762 }
763 }
764 }
765 }
766
767 if( OptoReg::is_valid(regs.second()) )
768 rm.Insert( regs.second() );
769 return new MachProjNode(this,proj->_con,rm,ideal_reg);
770 }
771
772 case TypeFunc::ReturnAdr:
773 case TypeFunc::FramePtr:
774 default:
775 ShouldNotReachHere();
776 }
777 return NULL;
778 }
779
780 // Do we Match on this edge index or not? Match no edges
781 uint CallNode::match_edge(uint idx) const {
782 return 0;
783 }
784
785 //
786 // Determine whether the call could modify the field of the specified
787 // instance at the specified offset.
788 //
789 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
790 assert((t_oop != NULL), "sanity");
791 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
792 const TypeTuple* args = _tf->domain();
793 Node* dest = NULL;
794 // Stubs that can be called once an ArrayCopyNode is expanded have
795 // different signatures. Look for the second pointer argument,
796 // that is the destination of the copy.
797 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
798 if (args->field_at(i)->isa_ptr()) {
799 j++;
800 if (j == 2) {
801 dest = in(i);
802 break;
803 }
804 }
805 }
806 guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
807 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
808 return true;
809 }
810 return false;
811 }
812 if (t_oop->is_known_instance()) {
821 Node* proj = proj_out_or_null(TypeFunc::Parms);
822 if ((proj == NULL) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
823 return false;
824 }
825 }
826 if (is_CallJava() && as_CallJava()->method() != NULL) {
827 ciMethod* meth = as_CallJava()->method();
828 if (meth->is_getter()) {
829 return false;
830 }
831 // May modify (by reflection) if an boxing object is passed
832 // as argument or returned.
833 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
834 if (proj != NULL) {
835 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
836 if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
837 (inst_t->instance_klass() == boxing_klass))) {
838 return true;
839 }
840 }
841 const TypeTuple* d = tf()->domain();
842 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
843 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
844 if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
845 (inst_t->instance_klass() == boxing_klass))) {
846 return true;
847 }
848 }
849 return false;
850 }
851 }
852 return true;
853 }
854
855 // Does this call have a direct reference to n other than debug information?
856 bool CallNode::has_non_debug_use(Node *n) {
857 const TypeTuple * d = tf()->domain();
858 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
859 Node *arg = in(i);
860 if (arg == n) {
861 return true;
862 }
863 }
864 return false;
865 }
866
867 // Returns the unique CheckCastPP of a call
868 // or 'this' if there are several CheckCastPP or unexpected uses
869 // or returns NULL if there is no one.
870 Node *CallNode::result_cast() {
871 Node *cast = NULL;
872
873 Node *p = proj_out_or_null(TypeFunc::Parms);
874 if (p == NULL)
875 return NULL;
876
877 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
878 Node *use = p->fast_out(i);
879 if (use->is_CheckCastPP()) {
880 if (cast != NULL) {
881 return this; // more than 1 CheckCastPP
882 }
883 cast = use;
884 } else if (!use->is_Initialize() &&
885 !use->is_AddP() &&
886 use->Opcode() != Op_MemBarStoreStore) {
887 // Expected uses are restricted to a CheckCastPP, an Initialize
888 // node, a MemBarStoreStore (clone) and AddP nodes. If we
889 // encounter any other use (a Phi node can be seen in rare
890 // cases) return this to prevent incorrect optimizations.
891 return this;
892 }
893 }
894 return cast;
895 }
896
897
898 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
899 projs->fallthrough_proj = NULL;
900 projs->fallthrough_catchproj = NULL;
901 projs->fallthrough_ioproj = NULL;
902 projs->catchall_ioproj = NULL;
903 projs->catchall_catchproj = NULL;
904 projs->fallthrough_memproj = NULL;
905 projs->catchall_memproj = NULL;
906 projs->resproj = NULL;
907 projs->exobj = NULL;
908
909 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
910 ProjNode *pn = fast_out(i)->as_Proj();
911 if (pn->outcnt() == 0) continue;
912 switch (pn->_con) {
913 case TypeFunc::Control:
914 {
915 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
916 projs->fallthrough_proj = pn;
917 const Node* cn = pn->unique_ctrl_out_or_null();
918 if (cn != NULL && cn->is_Catch()) {
919 ProjNode *cpn = NULL;
920 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
921 cpn = cn->fast_out(k)->as_Proj();
922 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
923 if (cpn->_con == CatchProjNode::fall_through_index)
924 projs->fallthrough_catchproj = cpn;
925 else {
926 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
927 projs->catchall_catchproj = cpn;
933 case TypeFunc::I_O:
934 if (pn->_is_io_use)
935 projs->catchall_ioproj = pn;
936 else
937 projs->fallthrough_ioproj = pn;
938 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
939 Node* e = pn->out(j);
940 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
941 assert(projs->exobj == NULL, "only one");
942 projs->exobj = e;
943 }
944 }
945 break;
946 case TypeFunc::Memory:
947 if (pn->_is_io_use)
948 projs->catchall_memproj = pn;
949 else
950 projs->fallthrough_memproj = pn;
951 break;
952 case TypeFunc::Parms:
953 projs->resproj = pn;
954 break;
955 default:
956 assert(false, "unexpected projection from allocation node.");
957 }
958 }
959
960 // The resproj may not exist because the result could be ignored
961 // and the exception object may not exist if an exception handler
962 // swallows the exception but all the other must exist and be found.
963 assert(projs->fallthrough_proj != NULL, "must be found");
964 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
965 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
966 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
967 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
968 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
969 if (separate_io_proj) {
970 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
971 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
972 }
973 }
974
975 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
976 #ifdef ASSERT
977 // Validate attached generator
978 CallGenerator* cg = generator();
979 if (cg != NULL) {
980 assert(is_CallStaticJava() && cg->is_mh_late_inline() ||
981 is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
982 }
983 #endif // ASSERT
984 return SafePointNode::Ideal(phase, can_reshape);
985 }
986
987 bool CallNode::is_call_to_arraycopystub() const {
988 if (_name != NULL && strstr(_name, "arraycopy") != 0) {
989 return true;
990 }
991 return false;
992 }
993
994 //=============================================================================
995 uint CallJavaNode::size_of() const { return sizeof(*this); }
996 bool CallJavaNode::cmp( const Node &n ) const {
997 CallJavaNode &call = (CallJavaNode&)n;
998 return CallNode::cmp(call) && _method == call._method &&
999 _override_symbolic_info == call._override_symbolic_info;
1000 }
1001
1002 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1003 // Copy debug information and adjust JVMState information
1004 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1005 uint new_dbg_start = tf()->domain()->cnt();
1006 int jvms_adj = new_dbg_start - old_dbg_start;
1007 assert (new_dbg_start == req(), "argument count mismatch");
1008 Compile* C = phase->C;
1009
1010 // SafePointScalarObject node could be referenced several times in debug info.
1011 // Use Dict to record cloned nodes.
1012 Dict* sosn_map = new Dict(cmpkey,hashkey);
1013 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1014 Node* old_in = sfpt->in(i);
1015 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1016 if (old_in != NULL && old_in->is_SafePointScalarObject()) {
1017 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1018 bool new_node;
1019 Node* new_in = old_sosn->clone(sosn_map, new_node);
1020 if (new_node) { // New node?
1021 new_in->set_req(0, C->root()); // reset control edge
1022 new_in = phase->transform(new_in); // Register new node.
1023 }
1024 old_in = new_in;
1025 }
1026 add_req(old_in);
1027 }
1028
1029 // JVMS may be shared so clone it before we modify it
1030 set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL);
1031 for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1032 jvms->set_map(this);
1033 jvms->set_locoff(jvms->locoff()+jvms_adj);
1034 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1035 jvms->set_monoff(jvms->monoff()+jvms_adj);
1036 jvms->set_scloff(jvms->scloff()+jvms_adj);
1037 jvms->set_endoff(jvms->endoff()+jvms_adj);
1038 }
1039 }
1040
1041 #ifdef ASSERT
1042 bool CallJavaNode::validate_symbolic_info() const {
1043 if (method() == NULL) {
1044 return true; // call into runtime or uncommon trap
1045 }
1046 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1047 ciMethod* callee = method();
1048 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1049 assert(override_symbolic_info(), "should be set");
1050 }
1051 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1052 return true;
1053 }
1054 #endif
1055
1056 #ifndef PRODUCT
1057 void CallJavaNode::dump_spec(outputStream* st) const {
1058 if( _method ) _method->print_short_name(st);
1059 CallNode::dump_spec(st);
1060 }
1061
1062 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1063 if (_method) {
1064 _method->print_short_name(st);
1065 } else {
1066 st->print("<?>");
1067 }
1068 }
1069 #endif
1070
1071 //=============================================================================
1072 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1073 bool CallStaticJavaNode::cmp( const Node &n ) const {
1074 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1075 return CallJavaNode::cmp(call);
1076 }
1077
1078 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1079 CallGenerator* cg = generator();
1080 if (can_reshape && cg != NULL) {
1081 assert(IncrementalInlineMH, "required");
1082 assert(cg->call_node() == this, "mismatch");
1083 assert(cg->is_mh_late_inline(), "not virtual");
1084
1085 // Check whether this MH handle call becomes a candidate for inlining.
1086 ciMethod* callee = cg->method();
1087 vmIntrinsics::ID iid = callee->intrinsic_id();
1088 if (iid == vmIntrinsics::_invokeBasic) {
1089 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1090 phase->C->prepend_late_inline(cg);
1091 set_generator(NULL);
1092 }
1093 } else if (iid == vmIntrinsics::_linkToNative) {
1094 // never retry
1095 } else {
1096 assert(callee->has_member_arg(), "wrong type of call?");
1097 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1098 phase->C->prepend_late_inline(cg);
1108 int CallStaticJavaNode::uncommon_trap_request() const {
1109 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
1110 return extract_uncommon_trap_request(this);
1111 }
1112 return 0;
1113 }
1114 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1115 #ifndef PRODUCT
1116 if (!(call->req() > TypeFunc::Parms &&
1117 call->in(TypeFunc::Parms) != NULL &&
1118 call->in(TypeFunc::Parms)->is_Con() &&
1119 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1120 assert(in_dump() != 0, "OK if dumping");
1121 tty->print("[bad uncommon trap]");
1122 return 0;
1123 }
1124 #endif
1125 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1126 }
1127
1128 #ifndef PRODUCT
1129 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1130 st->print("# Static ");
1131 if (_name != NULL) {
1132 st->print("%s", _name);
1133 int trap_req = uncommon_trap_request();
1134 if (trap_req != 0) {
1135 char buf[100];
1136 st->print("(%s)",
1137 Deoptimization::format_trap_request(buf, sizeof(buf),
1138 trap_req));
1139 }
1140 st->print(" ");
1141 }
1142 CallJavaNode::dump_spec(st);
1143 }
1144
1145 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1146 if (_method) {
1147 _method->print_short_name(st);
1212 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1213 bool CallRuntimeNode::cmp( const Node &n ) const {
1214 CallRuntimeNode &call = (CallRuntimeNode&)n;
1215 return CallNode::cmp(call) && !strcmp(_name,call._name);
1216 }
1217 #ifndef PRODUCT
1218 void CallRuntimeNode::dump_spec(outputStream *st) const {
1219 st->print("# ");
1220 st->print("%s", _name);
1221 CallNode::dump_spec(st);
1222 }
1223 #endif
1224 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1225 bool CallLeafVectorNode::cmp( const Node &n ) const {
1226 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1227 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1228 }
1229
1230 //------------------------------calling_convention-----------------------------
1231 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1232 SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
1233 }
1234
1235 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1236 #ifdef ASSERT
1237 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1238 "return vector size must match");
1239 const TypeTuple* d = tf()->domain();
1240 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1241 Node* arg = in(i);
1242 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1243 "vector argument size must match");
1244 }
1245 #endif
1246
1247 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1248 }
1249
1250 //=============================================================================
1251 //------------------------------calling_convention-----------------------------
1252
1253
1254 //=============================================================================
1255 #ifndef PRODUCT
1256 void CallLeafNode::dump_spec(outputStream *st) const {
1257 st->print("# ");
1258 st->print("%s", _name);
1259 CallNode::dump_spec(st);
1260 }
1261 #endif
1262
1263 //=============================================================================
1264
1265 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1266 assert(verify_jvms(jvms), "jvms must match");
1267 int loc = jvms->locoff() + idx;
1268 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1269 // If current local idx is top then local idx - 1 could
1270 // be a long/double that needs to be killed since top could
1271 // represent the 2nd half of the long/double.
1272 uint ideal = in(loc -1)->ideal_reg();
1273 if (ideal == Op_RegD || ideal == Op_RegL) {
1274 // set other (low index) half to top
1275 set_req(loc - 1, in(loc));
1276 }
1277 }
1278 set_req(loc, c);
1279 }
1280
1281 uint SafePointNode::size_of() const { return sizeof(*this); }
1282 bool SafePointNode::cmp( const Node &n ) const {
1293 }
1294 }
1295
1296
1297 //----------------------------next_exception-----------------------------------
1298 SafePointNode* SafePointNode::next_exception() const {
1299 if (len() == req()) {
1300 return NULL;
1301 } else {
1302 Node* n = in(req());
1303 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1304 return (SafePointNode*) n;
1305 }
1306 }
1307
1308
1309 //------------------------------Ideal------------------------------------------
1310 // Skip over any collapsed Regions
1311 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1312 assert(_jvms == NULL || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1313 return remove_dead_region(phase, can_reshape) ? this : NULL;
1314 }
1315
1316 //------------------------------Identity---------------------------------------
1317 // Remove obviously duplicate safepoints
1318 Node* SafePointNode::Identity(PhaseGVN* phase) {
1319
1320 // If you have back to back safepoints, remove one
1321 if (in(TypeFunc::Control)->is_SafePoint()) {
1322 Node* out_c = unique_ctrl_out_or_null();
1323 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1324 // outer loop's safepoint could confuse removal of the outer loop.
1325 if (out_c != NULL && !out_c->is_OuterStripMinedLoopEnd()) {
1326 return in(TypeFunc::Control);
1327 }
1328 }
1329
1330 // Transforming long counted loops requires a safepoint node. Do not
1331 // eliminate a safepoint until loop opts are over.
1332 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1333 Node *n0 = in(0)->in(0);
1457 igvn->delete_precedence_of(igvn->C->root(), nb);
1458 }
1459 }
1460
1461 //============== SafePointScalarObjectNode ==============
1462
1463 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1464 #ifdef ASSERT
1465 Node* alloc,
1466 #endif
1467 uint first_index,
1468 uint n_fields) :
1469 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1470 _first_index(first_index),
1471 _n_fields(n_fields)
1472 #ifdef ASSERT
1473 , _alloc(alloc)
1474 #endif
1475 {
1476 #ifdef ASSERT
1477 if (!alloc->is_Allocate()
1478 && !(alloc->Opcode() == Op_VectorBox)) {
1479 alloc->dump();
1480 assert(false, "unexpected call node");
1481 }
1482 #endif
1483 init_class_id(Class_SafePointScalarObject);
1484 }
1485
1486 // Do not allow value-numbering for SafePointScalarObject node.
1487 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1488 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1489 return (&n == this); // Always fail except on self
1490 }
1491
1492 uint SafePointScalarObjectNode::ideal_reg() const {
1493 return 0; // No matching to machine instruction
1494 }
1495
1496 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1497 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1515 new_node = true;
1516 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1517 sosn_map->Insert((void*)this, (void*)res);
1518 return res;
1519 }
1520
1521
1522 #ifndef PRODUCT
1523 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1524 st->print(" # fields@[%d..%d]", first_index(),
1525 first_index() + n_fields() - 1);
1526 }
1527
1528 #endif
1529
1530 //=============================================================================
1531 uint AllocateNode::size_of() const { return sizeof(*this); }
1532
1533 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1534 Node *ctrl, Node *mem, Node *abio,
1535 Node *size, Node *klass_node, Node *initial_test)
1536 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1537 {
1538 init_class_id(Class_Allocate);
1539 init_flags(Flag_is_macro);
1540 _is_scalar_replaceable = false;
1541 _is_non_escaping = false;
1542 _is_allocation_MemBar_redundant = false;
1543 Node *topnode = C->top();
1544
1545 init_req( TypeFunc::Control , ctrl );
1546 init_req( TypeFunc::I_O , abio );
1547 init_req( TypeFunc::Memory , mem );
1548 init_req( TypeFunc::ReturnAdr, topnode );
1549 init_req( TypeFunc::FramePtr , topnode );
1550 init_req( AllocSize , size);
1551 init_req( KlassNode , klass_node);
1552 init_req( InitialTest , initial_test);
1553 init_req( ALength , topnode);
1554 init_req( ValidLengthTest , topnode);
1555 C->add_macro_node(this);
1556 }
1557
1558 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1559 {
1560 assert(initializer != NULL &&
1561 initializer->is_initializer() &&
1562 !initializer->is_static(),
1563 "unexpected initializer method");
1564 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1565 if (analyzer == NULL) {
1566 return;
1567 }
1568
1569 // Allocation node is first parameter in its initializer
1570 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1571 _is_allocation_MemBar_redundant = true;
1572 }
1573 }
1574 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
1575 Node* mark_node = NULL;
1576 // For now only enable fast locking for non-array types
1577 mark_node = phase->MakeConX(markWord::prototype().value());
1578 return mark_node;
1579 }
1580
1581 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1582 // CastII, if appropriate. If we are not allowed to create new nodes, and
1583 // a CastII is appropriate, return NULL.
1584 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1585 Node *length = in(AllocateNode::ALength);
1586 assert(length != NULL, "length is not null");
1587
1588 const TypeInt* length_type = phase->find_int_type(length);
1589 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1590
1591 if (ary_type != NULL && length_type != NULL) {
1592 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1593 if (narrow_length_type != length_type) {
1594 // Assert one of:
1595 // - the narrow_length is 0
1596 // - the narrow_length is not wider than length
1597 assert(narrow_length_type == TypeInt::ZERO ||
1598 length_type->is_con() && narrow_length_type->is_con() &&
1937
1938 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
1939 st->print("%s", _kind_names[_kind]);
1940 }
1941 #endif
1942
1943 //=============================================================================
1944 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1945
1946 // perform any generic optimizations first (returns 'this' or NULL)
1947 Node *result = SafePointNode::Ideal(phase, can_reshape);
1948 if (result != NULL) return result;
1949 // Don't bother trying to transform a dead node
1950 if (in(0) && in(0)->is_top()) return NULL;
1951
1952 // Now see if we can optimize away this lock. We don't actually
1953 // remove the locking here, we simply set the _eliminate flag which
1954 // prevents macro expansion from expanding the lock. Since we don't
1955 // modify the graph, the value returned from this function is the
1956 // one computed above.
1957 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1958 //
1959 // If we are locking an non-escaped object, the lock/unlock is unnecessary
1960 //
1961 ConnectionGraph *cgr = phase->C->congraph();
1962 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1963 assert(!is_eliminated() || is_coarsened(), "sanity");
1964 // The lock could be marked eliminated by lock coarsening
1965 // code during first IGVN before EA. Replace coarsened flag
1966 // to eliminate all associated locks/unlocks.
1967 #ifdef ASSERT
1968 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
1969 #endif
1970 this->set_non_esc_obj();
1971 return result;
1972 }
1973
1974 if (!phase->C->do_locks_coarsening()) {
1975 return result; // Compiling without locks coarsening
1976 }
1977 //
2133 }
2134
2135 //=============================================================================
2136 uint UnlockNode::size_of() const { return sizeof(*this); }
2137
2138 //=============================================================================
2139 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2140
2141 // perform any generic optimizations first (returns 'this' or NULL)
2142 Node *result = SafePointNode::Ideal(phase, can_reshape);
2143 if (result != NULL) return result;
2144 // Don't bother trying to transform a dead node
2145 if (in(0) && in(0)->is_top()) return NULL;
2146
2147 // Now see if we can optimize away this unlock. We don't actually
2148 // remove the unlocking here, we simply set the _eliminate flag which
2149 // prevents macro expansion from expanding the unlock. Since we don't
2150 // modify the graph, the value returned from this function is the
2151 // one computed above.
2152 // Escape state is defined after Parse phase.
2153 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2154 //
2155 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2156 //
2157 ConnectionGraph *cgr = phase->C->congraph();
2158 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2159 assert(!is_eliminated() || is_coarsened(), "sanity");
2160 // The lock could be marked eliminated by lock coarsening
2161 // code during first IGVN before EA. Replace coarsened flag
2162 // to eliminate all associated locks/unlocks.
2163 #ifdef ASSERT
2164 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2165 #endif
2166 this->set_non_esc_obj();
2167 }
2168 }
2169 return result;
2170 }
2171
2172 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2173 if (C == NULL) {
2213 }
2214 // unrelated
2215 return false;
2216 }
2217
2218 if (dest_t->isa_aryptr()) {
2219 // arraycopy or array clone
2220 if (t_oop->isa_instptr()) {
2221 return false;
2222 }
2223 if (!t_oop->isa_aryptr()) {
2224 return true;
2225 }
2226
2227 const Type* elem = dest_t->is_aryptr()->elem();
2228 if (elem == Type::BOTTOM) {
2229 // An array but we don't know what elements are
2230 return true;
2231 }
2232
2233 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2234 uint dest_alias = phase->C->get_alias_index(dest_t);
2235 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2236
2237 return dest_alias == t_oop_alias;
2238 }
2239
2240 return true;
2241 }
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "ci/bcEscapeAnalyzer.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/c2/barrierSetC2.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/escape.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/locknode.hpp"
40 #include "opto/machnode.hpp"
41 #include "opto/matcher.hpp"
42 #include "opto/parse.hpp"
43 #include "opto/regalloc.hpp"
44 #include "opto/regmask.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "utilities/powerOfTwo.hpp"
50 #include "code/vmreg.hpp"
51
52 // Portions of code courtesy of Clifford Click
53
54 // Optimization - Graph Style
55
56 //=============================================================================
57 uint StartNode::size_of() const { return sizeof(*this); }
58 bool StartNode::cmp( const Node &n ) const
59 { return _domain == ((StartNode&)n)._domain; }
60 const Type *StartNode::bottom_type() const { return _domain; }
61 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
62 #ifndef PRODUCT
63 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
64 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
65 #endif
66
67 //------------------------------Ideal------------------------------------------
68 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
69 return remove_dead_region(phase, can_reshape) ? this : NULL;
70 }
71
72 //------------------------------calling_convention-----------------------------
73 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
74 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
75 }
76
77 //------------------------------Registers--------------------------------------
78 const RegMask &StartNode::in_RegMask(uint) const {
79 return RegMask::Empty;
80 }
81
82 //------------------------------match------------------------------------------
83 // Construct projections for incoming parameters, and their RegMask info
84 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
85 switch (proj->_con) {
86 case TypeFunc::Control:
87 case TypeFunc::I_O:
88 case TypeFunc::Memory:
89 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
90 case TypeFunc::FramePtr:
91 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
92 case TypeFunc::ReturnAdr:
93 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
94 case TypeFunc::Parms:
95 default: {
96 uint parm_num = proj->_con - TypeFunc::Parms;
97 const Type *t = _domain->field_at(proj->_con);
98 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
99 return new ConNode(Type::TOP);
100 uint ideal_reg = t->ideal_reg();
101 RegMask &rm = match->_calling_convention_mask[parm_num];
102 return new MachProjNode(this,proj->_con,rm,ideal_reg);
103 }
104 }
105 return NULL;
106 }
107
108 //=============================================================================
109 const char * const ParmNode::names[TypeFunc::Parms+1] = {
110 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
111 };
112
113 #ifndef PRODUCT
114 void ParmNode::dump_spec(outputStream *st) const {
115 if( _con < TypeFunc::Parms ) {
116 st->print("%s", names[_con]);
117 } else {
118 st->print("Parm%d: ",_con-TypeFunc::Parms);
119 // Verbose and WizardMode dump bottom_type for all nodes
120 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
121 }
122 }
123
124 void ParmNode::dump_compact_spec(outputStream *st) const {
125 if (_con < TypeFunc::Parms) {
126 st->print("%s", names[_con]);
127 } else {
473 if (cik->is_instance_klass()) {
474 cik->print_name_on(st);
475 iklass = cik->as_instance_klass();
476 } else if (cik->is_type_array_klass()) {
477 cik->as_array_klass()->base_element_type()->print_name_on(st);
478 st->print("[%d]", spobj->n_fields());
479 } else if (cik->is_obj_array_klass()) {
480 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
481 if (cie->is_instance_klass()) {
482 cie->print_name_on(st);
483 } else if (cie->is_type_array_klass()) {
484 cie->as_array_klass()->base_element_type()->print_name_on(st);
485 } else {
486 ShouldNotReachHere();
487 }
488 st->print("[%d]", spobj->n_fields());
489 int ndim = cik->as_array_klass()->dimension() - 1;
490 while (ndim-- > 0) {
491 st->print("[]");
492 }
493 } else if (cik->is_flat_array_klass()) {
494 ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
495 cie->print_name_on(st);
496 st->print("[%d]", spobj->n_fields());
497 int ndim = cik->as_array_klass()->dimension() - 1;
498 while (ndim-- > 0) {
499 st->print("[]");
500 }
501 }
502 st->print("={");
503 uint nf = spobj->n_fields();
504 if (nf > 0) {
505 uint first_ind = spobj->first_index(mcall->jvms());
506 if (iklass != NULL && iklass->is_inlinetype()) {
507 Node* init_node = mcall->in(first_ind++);
508 if (!init_node->is_top()) {
509 st->print(" [is_init");
510 format_helper(regalloc, st, init_node, ":", -1, NULL);
511 }
512 }
513 Node* fld_node = mcall->in(first_ind);
514 ciField* cifield;
515 if (iklass != NULL) {
516 st->print(" [");
517 cifield = iklass->nonstatic_field_at(0);
518 cifield->print_name_on(st);
519 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
520 } else {
521 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
522 }
523 for (uint j = 1; j < nf; j++) {
524 fld_node = mcall->in(first_ind+j);
525 if (iklass != NULL) {
526 st->print(", [");
527 cifield = iklass->nonstatic_field_at(j);
528 cifield->print_name_on(st);
529 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
530 } else {
531 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
532 }
706 if (i == TypeFunc::Parms) st->print("(");
707 Node* p = in(i);
708 if (p != nullptr) {
709 p->dump_idx(false, st, dc);
710 st->print(" ");
711 } else {
712 st->print("_ ");
713 }
714 }
715 st->print(")");
716 }
717
718 void CallNode::dump_spec(outputStream *st) const {
719 st->print(" ");
720 if (tf() != NULL) tf()->dump_on(st);
721 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
722 if (jvms() != NULL) jvms()->dump_spec(st);
723 }
724 #endif
725
726 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
727 const Type* CallNode::Value(PhaseGVN* phase) const {
728 if (!in(0) || phase->type(in(0)) == Type::TOP) {
729 return Type::TOP;
730 }
731 return tf()->range_cc();
732 }
733
734 //------------------------------calling_convention-----------------------------
735 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
736 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
737 // The call to that stub is a special case: its inputs are
738 // multiple values returned from a call and so it should follow
739 // the return convention.
740 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
741 return;
742 }
743 // Use the standard compiler calling convention
744 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
745 }
746
747
748 //------------------------------match------------------------------------------
749 // Construct projections for control, I/O, memory-fields, ..., and
750 // return result(s) along with their RegMask info
751 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
752 uint con = proj->_con;
753 const TypeTuple* range_cc = tf()->range_cc();
754 if (con >= TypeFunc::Parms) {
755 if (tf()->returns_inline_type_as_fields()) {
756 // The call returns multiple values (inline type fields): we
757 // create one projection per returned value.
758 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
759 uint ideal_reg = range_cc->field_at(con)->ideal_reg();
760 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
761 } else {
762 if (con == TypeFunc::Parms) {
763 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
764 OptoRegPair regs = Opcode() == Op_CallLeafVector
765 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
766 : match->c_return_value(ideal_reg);
767 RegMask rm = RegMask(regs.first());
768
769 if (Opcode() == Op_CallLeafVector) {
770 // If the return is in vector, compute appropriate regmask taking into account the whole range
771 if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
772 if(OptoReg::is_valid(regs.second())) {
773 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
774 rm.Insert(r);
775 }
776 }
777 }
778 }
779
780 if (OptoReg::is_valid(regs.second())) {
781 rm.Insert(regs.second());
782 }
783 return new MachProjNode(this,con,rm,ideal_reg);
784 } else {
785 assert(con == TypeFunc::Parms+1, "only one return value");
786 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
787 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
788 }
789 }
790 }
791
792 switch (con) {
793 case TypeFunc::Control:
794 case TypeFunc::I_O:
795 case TypeFunc::Memory:
796 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
797
798 case TypeFunc::ReturnAdr:
799 case TypeFunc::FramePtr:
800 default:
801 ShouldNotReachHere();
802 }
803 return NULL;
804 }
805
806 // Do we Match on this edge index or not? Match no edges
807 uint CallNode::match_edge(uint idx) const {
808 return 0;
809 }
810
811 //
812 // Determine whether the call could modify the field of the specified
813 // instance at the specified offset.
814 //
815 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
816 assert((t_oop != NULL), "sanity");
817 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
818 const TypeTuple* args = _tf->domain_sig();
819 Node* dest = NULL;
820 // Stubs that can be called once an ArrayCopyNode is expanded have
821 // different signatures. Look for the second pointer argument,
822 // that is the destination of the copy.
823 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
824 if (args->field_at(i)->isa_ptr()) {
825 j++;
826 if (j == 2) {
827 dest = in(i);
828 break;
829 }
830 }
831 }
832 guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
833 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
834 return true;
835 }
836 return false;
837 }
838 if (t_oop->is_known_instance()) {
847 Node* proj = proj_out_or_null(TypeFunc::Parms);
848 if ((proj == NULL) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
849 return false;
850 }
851 }
852 if (is_CallJava() && as_CallJava()->method() != NULL) {
853 ciMethod* meth = as_CallJava()->method();
854 if (meth->is_getter()) {
855 return false;
856 }
857 // May modify (by reflection) if an boxing object is passed
858 // as argument or returned.
859 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
860 if (proj != NULL) {
861 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
862 if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
863 (inst_t->instance_klass() == boxing_klass))) {
864 return true;
865 }
866 }
867 const TypeTuple* d = tf()->domain_cc();
868 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
869 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
870 if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
871 (inst_t->instance_klass() == boxing_klass))) {
872 return true;
873 }
874 }
875 return false;
876 }
877 }
878 return true;
879 }
880
881 // Does this call have a direct reference to n other than debug information?
882 bool CallNode::has_non_debug_use(Node* n) {
883 const TypeTuple* d = tf()->domain_cc();
884 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
885 if (in(i) == n) {
886 return true;
887 }
888 }
889 return false;
890 }
891
892 bool CallNode::has_debug_use(Node* n) {
893 if (jvms() != NULL) {
894 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
895 if (in(i) == n) {
896 return true;
897 }
898 }
899 }
900 return false;
901 }
902
903 // Returns the unique CheckCastPP of a call
904 // or 'this' if there are several CheckCastPP or unexpected uses
905 // or returns NULL if there is no one.
906 Node *CallNode::result_cast() {
907 Node *cast = NULL;
908
909 Node *p = proj_out_or_null(TypeFunc::Parms);
910 if (p == NULL)
911 return NULL;
912
913 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
914 Node *use = p->fast_out(i);
915 if (use->is_CheckCastPP()) {
916 if (cast != NULL) {
917 return this; // more than 1 CheckCastPP
918 }
919 cast = use;
920 } else if (!use->is_Initialize() &&
921 !use->is_AddP() &&
922 use->Opcode() != Op_MemBarStoreStore) {
923 // Expected uses are restricted to a CheckCastPP, an Initialize
924 // node, a MemBarStoreStore (clone) and AddP nodes. If we
925 // encounter any other use (a Phi node can be seen in rare
926 // cases) return this to prevent incorrect optimizations.
927 return this;
928 }
929 }
930 return cast;
931 }
932
933
934 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
935 uint max_res = TypeFunc::Parms-1;
936 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
937 ProjNode *pn = fast_out(i)->as_Proj();
938 max_res = MAX2(max_res, pn->_con);
939 }
940
941 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
942
943 uint projs_size = sizeof(CallProjections);
944 if (max_res > TypeFunc::Parms) {
945 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
946 }
947 char* projs_storage = resource_allocate_bytes(projs_size);
948 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
949
950 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
951 ProjNode *pn = fast_out(i)->as_Proj();
952 if (pn->outcnt() == 0) continue;
953 switch (pn->_con) {
954 case TypeFunc::Control:
955 {
956 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
957 projs->fallthrough_proj = pn;
958 const Node* cn = pn->unique_ctrl_out_or_null();
959 if (cn != NULL && cn->is_Catch()) {
960 ProjNode *cpn = NULL;
961 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
962 cpn = cn->fast_out(k)->as_Proj();
963 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
964 if (cpn->_con == CatchProjNode::fall_through_index)
965 projs->fallthrough_catchproj = cpn;
966 else {
967 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
968 projs->catchall_catchproj = cpn;
974 case TypeFunc::I_O:
975 if (pn->_is_io_use)
976 projs->catchall_ioproj = pn;
977 else
978 projs->fallthrough_ioproj = pn;
979 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
980 Node* e = pn->out(j);
981 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
982 assert(projs->exobj == NULL, "only one");
983 projs->exobj = e;
984 }
985 }
986 break;
987 case TypeFunc::Memory:
988 if (pn->_is_io_use)
989 projs->catchall_memproj = pn;
990 else
991 projs->fallthrough_memproj = pn;
992 break;
993 case TypeFunc::Parms:
994 projs->resproj[0] = pn;
995 break;
996 default:
997 assert(pn->_con <= max_res, "unexpected projection from allocation node.");
998 projs->resproj[pn->_con-TypeFunc::Parms] = pn;
999 break;
1000 }
1001 }
1002
1003 // The resproj may not exist because the result could be ignored
1004 // and the exception object may not exist if an exception handler
1005 // swallows the exception but all the other must exist and be found.
1006 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1007 assert(!do_asserts || projs->fallthrough_proj != NULL, "must be found");
1008 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
1009 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
1010 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
1011 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
1012 if (separate_io_proj) {
1013 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
1014 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
1015 }
1016 return projs;
1017 }
1018
1019 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1020 #ifdef ASSERT
1021 // Validate attached generator
1022 CallGenerator* cg = generator();
1023 if (cg != NULL) {
1024 assert(is_CallStaticJava() && cg->is_mh_late_inline() ||
1025 is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
1026 }
1027 #endif // ASSERT
1028 return SafePointNode::Ideal(phase, can_reshape);
1029 }
1030
1031 bool CallNode::is_call_to_arraycopystub() const {
1032 if (_name != NULL && strstr(_name, "arraycopy") != 0) {
1033 return true;
1034 }
1035 return false;
1036 }
1037
1038 //=============================================================================
1039 uint CallJavaNode::size_of() const { return sizeof(*this); }
1040 bool CallJavaNode::cmp( const Node &n ) const {
1041 CallJavaNode &call = (CallJavaNode&)n;
1042 return CallNode::cmp(call) && _method == call._method &&
1043 _override_symbolic_info == call._override_symbolic_info;
1044 }
1045
1046 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1047 // Copy debug information and adjust JVMState information
1048 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1049 uint new_dbg_start = tf()->domain_sig()->cnt();
1050 int jvms_adj = new_dbg_start - old_dbg_start;
1051 assert (new_dbg_start == req(), "argument count mismatch");
1052 Compile* C = phase->C;
1053
1054 // SafePointScalarObject node could be referenced several times in debug info.
1055 // Use Dict to record cloned nodes.
1056 Dict* sosn_map = new Dict(cmpkey,hashkey);
1057 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1058 Node* old_in = sfpt->in(i);
1059 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1060 if (old_in != NULL && old_in->is_SafePointScalarObject()) {
1061 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1062 bool new_node;
1063 Node* new_in = old_sosn->clone(sosn_map, new_node);
1064 if (new_node) { // New node?
1065 new_in->set_req(0, C->root()); // reset control edge
1066 new_in = phase->transform(new_in); // Register new node.
1067 }
1068 old_in = new_in;
1069 }
1070 add_req(old_in);
1071 }
1072
1073 // JVMS may be shared so clone it before we modify it
1074 set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL);
1075 for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1076 jvms->set_map(this);
1077 jvms->set_locoff(jvms->locoff()+jvms_adj);
1078 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1079 jvms->set_monoff(jvms->monoff()+jvms_adj);
1080 jvms->set_scloff(jvms->scloff()+jvms_adj);
1081 jvms->set_endoff(jvms->endoff()+jvms_adj);
1082 }
1083 }
1084
1085 #ifdef ASSERT
1086 bool CallJavaNode::validate_symbolic_info() const {
1087 if (method() == NULL) {
1088 return true; // call into runtime or uncommon trap
1089 }
1090 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1091 if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1092 return true;
1093 }
1094 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1095 ciMethod* callee = method();
1096 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1097 assert(override_symbolic_info(), "should be set");
1098 }
1099 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1100 return true;
1101 }
1102 #endif
1103
1104 #ifndef PRODUCT
1105 void CallJavaNode::dump_spec(outputStream* st) const {
1106 if( _method ) _method->print_short_name(st);
1107 CallNode::dump_spec(st);
1108 }
1109
1110 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1111 if (_method) {
1112 _method->print_short_name(st);
1113 } else {
1114 st->print("<?>");
1115 }
1116 }
1117 #endif
1118
1119 //=============================================================================
1120 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1121 bool CallStaticJavaNode::cmp( const Node &n ) const {
1122 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1123 return CallJavaNode::cmp(call);
1124 }
1125
1126 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1127 if (can_reshape && uncommon_trap_request() != 0) {
1128 if (remove_useless_allocation(phase, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
1129 if (!in(0)->is_Region()) {
1130 PhaseIterGVN* igvn = phase->is_IterGVN();
1131 igvn->replace_input_of(this, 0, phase->C->top());
1132 }
1133 return this;
1134 }
1135 }
1136
1137 CallGenerator* cg = generator();
1138 if (can_reshape && cg != NULL) {
1139 assert(IncrementalInlineMH, "required");
1140 assert(cg->call_node() == this, "mismatch");
1141 assert(cg->is_mh_late_inline(), "not virtual");
1142
1143 // Check whether this MH handle call becomes a candidate for inlining.
1144 ciMethod* callee = cg->method();
1145 vmIntrinsics::ID iid = callee->intrinsic_id();
1146 if (iid == vmIntrinsics::_invokeBasic) {
1147 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1148 phase->C->prepend_late_inline(cg);
1149 set_generator(NULL);
1150 }
1151 } else if (iid == vmIntrinsics::_linkToNative) {
1152 // never retry
1153 } else {
1154 assert(callee->has_member_arg(), "wrong type of call?");
1155 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1156 phase->C->prepend_late_inline(cg);
1166 int CallStaticJavaNode::uncommon_trap_request() const {
1167 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
1168 return extract_uncommon_trap_request(this);
1169 }
1170 return 0;
1171 }
1172 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1173 #ifndef PRODUCT
1174 if (!(call->req() > TypeFunc::Parms &&
1175 call->in(TypeFunc::Parms) != NULL &&
1176 call->in(TypeFunc::Parms)->is_Con() &&
1177 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1178 assert(in_dump() != 0, "OK if dumping");
1179 tty->print("[bad uncommon trap]");
1180 return 0;
1181 }
1182 #endif
1183 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1184 }
1185
1186 bool CallStaticJavaNode::remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg) {
1187 // Split if can cause the flattened array branch of an array load to
1188 // end in an uncommon trap. In that case, the allocation of the
1189 // loaded value and its initialization is useless. Eliminate it. use
1190 // the jvm state of the allocation to create a new uncommon trap
1191 // call at the load.
1192 if (ctl == NULL || ctl->is_top() || mem == NULL || mem->is_top() || !mem->is_MergeMem()) {
1193 return false;
1194 }
1195 PhaseIterGVN* igvn = phase->is_IterGVN();
1196 if (ctl->is_Region()) {
1197 bool res = false;
1198 for (uint i = 1; i < ctl->req(); i++) {
1199 MergeMemNode* mm = mem->clone()->as_MergeMem();
1200 for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1201 Node* m = mms.memory();
1202 if (m->is_Phi() && m->in(0) == ctl) {
1203 mms.set_memory(m->in(i));
1204 }
1205 }
1206 if (remove_useless_allocation(phase, ctl->in(i), mm, unc_arg)) {
1207 res = true;
1208 if (!ctl->in(i)->is_Region()) {
1209 igvn->replace_input_of(ctl, i, phase->C->top());
1210 }
1211 }
1212 igvn->remove_dead_node(mm);
1213 }
1214 return res;
1215 }
1216 // verify the control flow is ok
1217 Node* call = ctl;
1218 MemBarNode* membar = NULL;
1219 for (;;) {
1220 if (call == NULL || call->is_top()) {
1221 return false;
1222 }
1223 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1224 call = call->in(0);
1225 } else if (call->Opcode() == Op_CallStaticJava &&
1226 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1227 assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
1228 membar = call->in(0)->in(0)->as_MemBar();
1229 break;
1230 } else {
1231 return false;
1232 }
1233 }
1234
1235 JVMState* jvms = call->jvms();
1236 if (phase->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1237 return false;
1238 }
1239
1240 Node* alloc_mem = call->in(TypeFunc::Memory);
1241 if (alloc_mem == NULL || alloc_mem->is_top()) {
1242 return false;
1243 }
1244 if (!alloc_mem->is_MergeMem()) {
1245 alloc_mem = MergeMemNode::make(alloc_mem);
1246 igvn->register_new_node_with_optimizer(alloc_mem);
1247 }
1248
1249 // and that there's no unexpected side effect
1250 for (MergeMemStream mms2(mem->as_MergeMem(), alloc_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1251 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1252 Node* m2 = mms2.memory2();
1253
1254 for (uint i = 0; i < 100; i++) {
1255 if (m1 == m2) {
1256 break;
1257 } else if (m1->is_Proj()) {
1258 m1 = m1->in(0);
1259 } else if (m1->is_MemBar()) {
1260 m1 = m1->in(TypeFunc::Memory);
1261 } else if (m1->Opcode() == Op_CallStaticJava &&
1262 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1263 if (m1 != call) {
1264 return false;
1265 }
1266 break;
1267 } else if (m1->is_MergeMem()) {
1268 MergeMemNode* mm = m1->as_MergeMem();
1269 int idx = mms2.alias_idx();
1270 if (idx == Compile::AliasIdxBot) {
1271 m1 = mm->base_memory();
1272 } else {
1273 m1 = mm->memory_at(idx);
1274 }
1275 } else {
1276 return false;
1277 }
1278 }
1279 }
1280 if (alloc_mem->outcnt() == 0) {
1281 igvn->remove_dead_node(alloc_mem);
1282 }
1283
1284 // Remove membar preceding the call
1285 membar->remove(igvn);
1286
1287 address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
1288 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", NULL);
1289 unc->init_req(TypeFunc::Control, call->in(0));
1290 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1291 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1292 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
1293 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1294 unc->init_req(TypeFunc::Parms+0, unc_arg);
1295 unc->set_cnt(PROB_UNLIKELY_MAG(4));
1296 unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1297
1298 igvn->replace_input_of(call, 0, phase->C->top());
1299
1300 igvn->register_new_node_with_optimizer(unc);
1301
1302 Node* ctrl = phase->transform(new ProjNode(unc, TypeFunc::Control));
1303 Node* halt = phase->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1304 igvn->add_input_to(phase->C->root(), halt);
1305
1306 return true;
1307 }
1308
1309
1310 #ifndef PRODUCT
1311 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1312 st->print("# Static ");
1313 if (_name != NULL) {
1314 st->print("%s", _name);
1315 int trap_req = uncommon_trap_request();
1316 if (trap_req != 0) {
1317 char buf[100];
1318 st->print("(%s)",
1319 Deoptimization::format_trap_request(buf, sizeof(buf),
1320 trap_req));
1321 }
1322 st->print(" ");
1323 }
1324 CallJavaNode::dump_spec(st);
1325 }
1326
1327 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1328 if (_method) {
1329 _method->print_short_name(st);
1394 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1395 bool CallRuntimeNode::cmp( const Node &n ) const {
1396 CallRuntimeNode &call = (CallRuntimeNode&)n;
1397 return CallNode::cmp(call) && !strcmp(_name,call._name);
1398 }
1399 #ifndef PRODUCT
1400 void CallRuntimeNode::dump_spec(outputStream *st) const {
1401 st->print("# ");
1402 st->print("%s", _name);
1403 CallNode::dump_spec(st);
1404 }
1405 #endif
1406 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1407 bool CallLeafVectorNode::cmp( const Node &n ) const {
1408 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1409 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1410 }
1411
1412 //------------------------------calling_convention-----------------------------
1413 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1414 if (_entry_point == NULL) {
1415 // The call to that stub is a special case: its inputs are
1416 // multiple values returned from a call and so it should follow
1417 // the return convention.
1418 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1419 return;
1420 }
1421 SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
1422 }
1423
1424 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1425 #ifdef ASSERT
1426 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1427 "return vector size must match");
1428 const TypeTuple* d = tf()->domain_sig();
1429 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1430 Node* arg = in(i);
1431 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1432 "vector argument size must match");
1433 }
1434 #endif
1435
1436 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1437 }
1438
1439 //=============================================================================
1440 //------------------------------calling_convention-----------------------------
1441
1442
1443 //=============================================================================
1444 #ifndef PRODUCT
1445 void CallLeafNode::dump_spec(outputStream *st) const {
1446 st->print("# ");
1447 st->print("%s", _name);
1448 CallNode::dump_spec(st);
1449 }
1450 #endif
1451
1452 uint CallLeafNoFPNode::match_edge(uint idx) const {
1453 // Null entry point is a special case for which the target is in a
1454 // register. Need to match that edge.
1455 return entry_point() == NULL && idx == TypeFunc::Parms;
1456 }
1457
1458 //=============================================================================
1459
1460 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1461 assert(verify_jvms(jvms), "jvms must match");
1462 int loc = jvms->locoff() + idx;
1463 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1464 // If current local idx is top then local idx - 1 could
1465 // be a long/double that needs to be killed since top could
1466 // represent the 2nd half of the long/double.
1467 uint ideal = in(loc -1)->ideal_reg();
1468 if (ideal == Op_RegD || ideal == Op_RegL) {
1469 // set other (low index) half to top
1470 set_req(loc - 1, in(loc));
1471 }
1472 }
1473 set_req(loc, c);
1474 }
1475
1476 uint SafePointNode::size_of() const { return sizeof(*this); }
1477 bool SafePointNode::cmp( const Node &n ) const {
1488 }
1489 }
1490
1491
1492 //----------------------------next_exception-----------------------------------
1493 SafePointNode* SafePointNode::next_exception() const {
1494 if (len() == req()) {
1495 return NULL;
1496 } else {
1497 Node* n = in(req());
1498 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1499 return (SafePointNode*) n;
1500 }
1501 }
1502
1503
1504 //------------------------------Ideal------------------------------------------
1505 // Skip over any collapsed Regions
1506 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1507 assert(_jvms == NULL || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1508 if (remove_dead_region(phase, can_reshape)) {
1509 return this;
1510 }
1511 // Scalarize inline types in safepoint debug info.
1512 // Delay this until all inlining is over to avoid getting inconsistent debug info.
1513 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != NULL) {
1514 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
1515 Node* n = in(i)->uncast();
1516 if (n->is_InlineType()) {
1517 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
1518 }
1519 }
1520 }
1521 return NULL;
1522 }
1523
1524 //------------------------------Identity---------------------------------------
1525 // Remove obviously duplicate safepoints
1526 Node* SafePointNode::Identity(PhaseGVN* phase) {
1527
1528 // If you have back to back safepoints, remove one
1529 if (in(TypeFunc::Control)->is_SafePoint()) {
1530 Node* out_c = unique_ctrl_out_or_null();
1531 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1532 // outer loop's safepoint could confuse removal of the outer loop.
1533 if (out_c != NULL && !out_c->is_OuterStripMinedLoopEnd()) {
1534 return in(TypeFunc::Control);
1535 }
1536 }
1537
1538 // Transforming long counted loops requires a safepoint node. Do not
1539 // eliminate a safepoint until loop opts are over.
1540 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1541 Node *n0 = in(0)->in(0);
1665 igvn->delete_precedence_of(igvn->C->root(), nb);
1666 }
1667 }
1668
1669 //============== SafePointScalarObjectNode ==============
1670
1671 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1672 #ifdef ASSERT
1673 Node* alloc,
1674 #endif
1675 uint first_index,
1676 uint n_fields) :
1677 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1678 _first_index(first_index),
1679 _n_fields(n_fields)
1680 #ifdef ASSERT
1681 , _alloc(alloc)
1682 #endif
1683 {
1684 #ifdef ASSERT
1685 if (alloc != NULL && !alloc->is_Allocate()
1686 && !(alloc->Opcode() == Op_VectorBox)) {
1687 alloc->dump();
1688 assert(false, "unexpected call node");
1689 }
1690 #endif
1691 init_class_id(Class_SafePointScalarObject);
1692 }
1693
1694 // Do not allow value-numbering for SafePointScalarObject node.
1695 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1696 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1697 return (&n == this); // Always fail except on self
1698 }
1699
1700 uint SafePointScalarObjectNode::ideal_reg() const {
1701 return 0; // No matching to machine instruction
1702 }
1703
1704 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1705 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1723 new_node = true;
1724 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1725 sosn_map->Insert((void*)this, (void*)res);
1726 return res;
1727 }
1728
1729
1730 #ifndef PRODUCT
1731 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1732 st->print(" # fields@[%d..%d]", first_index(),
1733 first_index() + n_fields() - 1);
1734 }
1735
1736 #endif
1737
1738 //=============================================================================
1739 uint AllocateNode::size_of() const { return sizeof(*this); }
1740
1741 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1742 Node *ctrl, Node *mem, Node *abio,
1743 Node *size, Node *klass_node,
1744 Node* initial_test,
1745 InlineTypeNode* inline_type_node)
1746 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1747 {
1748 init_class_id(Class_Allocate);
1749 init_flags(Flag_is_macro);
1750 _is_scalar_replaceable = false;
1751 _is_non_escaping = false;
1752 _is_allocation_MemBar_redundant = false;
1753 _larval = false;
1754 Node *topnode = C->top();
1755
1756 init_req( TypeFunc::Control , ctrl );
1757 init_req( TypeFunc::I_O , abio );
1758 init_req( TypeFunc::Memory , mem );
1759 init_req( TypeFunc::ReturnAdr, topnode );
1760 init_req( TypeFunc::FramePtr , topnode );
1761 init_req( AllocSize , size);
1762 init_req( KlassNode , klass_node);
1763 init_req( InitialTest , initial_test);
1764 init_req( ALength , topnode);
1765 init_req( ValidLengthTest , topnode);
1766 init_req( InlineType , inline_type_node);
1767 // DefaultValue defaults to NULL
1768 // RawDefaultValue defaults to NULL
1769 C->add_macro_node(this);
1770 }
1771
1772 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1773 {
1774 assert(initializer != NULL &&
1775 initializer->is_object_constructor_or_class_initializer(),
1776 "unexpected initializer method");
1777 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1778 if (analyzer == NULL) {
1779 return;
1780 }
1781
1782 // Allocation node is first parameter in its initializer
1783 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1784 _is_allocation_MemBar_redundant = true;
1785 }
1786 }
1787
1788 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1789 Node* mark_node = NULL;
1790 if (EnableValhalla) {
1791 Node* klass_node = in(AllocateNode::KlassNode);
1792 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1793 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1794 } else {
1795 mark_node = phase->MakeConX(markWord::prototype().value());
1796 }
1797 mark_node = phase->transform(mark_node);
1798 // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
1799 return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
1800 }
1801
1802 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1803 // CastII, if appropriate. If we are not allowed to create new nodes, and
1804 // a CastII is appropriate, return NULL.
1805 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1806 Node *length = in(AllocateNode::ALength);
1807 assert(length != NULL, "length is not null");
1808
1809 const TypeInt* length_type = phase->find_int_type(length);
1810 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1811
1812 if (ary_type != NULL && length_type != NULL) {
1813 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1814 if (narrow_length_type != length_type) {
1815 // Assert one of:
1816 // - the narrow_length is 0
1817 // - the narrow_length is not wider than length
1818 assert(narrow_length_type == TypeInt::ZERO ||
1819 length_type->is_con() && narrow_length_type->is_con() &&
2158
2159 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2160 st->print("%s", _kind_names[_kind]);
2161 }
2162 #endif
2163
2164 //=============================================================================
2165 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2166
2167 // perform any generic optimizations first (returns 'this' or NULL)
2168 Node *result = SafePointNode::Ideal(phase, can_reshape);
2169 if (result != NULL) return result;
2170 // Don't bother trying to transform a dead node
2171 if (in(0) && in(0)->is_top()) return NULL;
2172
2173 // Now see if we can optimize away this lock. We don't actually
2174 // remove the locking here, we simply set the _eliminate flag which
2175 // prevents macro expansion from expanding the lock. Since we don't
2176 // modify the graph, the value returned from this function is the
2177 // one computed above.
2178 const Type* obj_type = phase->type(obj_node());
2179 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2180 //
2181 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2182 //
2183 ConnectionGraph *cgr = phase->C->congraph();
2184 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2185 assert(!is_eliminated() || is_coarsened(), "sanity");
2186 // The lock could be marked eliminated by lock coarsening
2187 // code during first IGVN before EA. Replace coarsened flag
2188 // to eliminate all associated locks/unlocks.
2189 #ifdef ASSERT
2190 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2191 #endif
2192 this->set_non_esc_obj();
2193 return result;
2194 }
2195
2196 if (!phase->C->do_locks_coarsening()) {
2197 return result; // Compiling without locks coarsening
2198 }
2199 //
2355 }
2356
2357 //=============================================================================
2358 uint UnlockNode::size_of() const { return sizeof(*this); }
2359
2360 //=============================================================================
2361 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2362
2363 // perform any generic optimizations first (returns 'this' or NULL)
2364 Node *result = SafePointNode::Ideal(phase, can_reshape);
2365 if (result != NULL) return result;
2366 // Don't bother trying to transform a dead node
2367 if (in(0) && in(0)->is_top()) return NULL;
2368
2369 // Now see if we can optimize away this unlock. We don't actually
2370 // remove the unlocking here, we simply set the _eliminate flag which
2371 // prevents macro expansion from expanding the unlock. Since we don't
2372 // modify the graph, the value returned from this function is the
2373 // one computed above.
2374 // Escape state is defined after Parse phase.
2375 const Type* obj_type = phase->type(obj_node());
2376 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2377 //
2378 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2379 //
2380 ConnectionGraph *cgr = phase->C->congraph();
2381 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2382 assert(!is_eliminated() || is_coarsened(), "sanity");
2383 // The lock could be marked eliminated by lock coarsening
2384 // code during first IGVN before EA. Replace coarsened flag
2385 // to eliminate all associated locks/unlocks.
2386 #ifdef ASSERT
2387 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2388 #endif
2389 this->set_non_esc_obj();
2390 }
2391 }
2392 return result;
2393 }
2394
2395 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2396 if (C == NULL) {
2436 }
2437 // unrelated
2438 return false;
2439 }
2440
2441 if (dest_t->isa_aryptr()) {
2442 // arraycopy or array clone
2443 if (t_oop->isa_instptr()) {
2444 return false;
2445 }
2446 if (!t_oop->isa_aryptr()) {
2447 return true;
2448 }
2449
2450 const Type* elem = dest_t->is_aryptr()->elem();
2451 if (elem == Type::BOTTOM) {
2452 // An array but we don't know what elements are
2453 return true;
2454 }
2455
2456 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2457 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2458 uint dest_alias = phase->C->get_alias_index(dest_t);
2459 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2460
2461 return dest_alias == t_oop_alias;
2462 }
2463
2464 return true;
2465 }
|