6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "opto/callGenerator.hpp"
32 #include "opto/callnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/escape.hpp"
36 #include "opto/locknode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/matcher.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/regalloc.hpp"
41 #include "opto/regmask.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/runtime.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/powerOfTwo.hpp"
46 #include "code/vmreg.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 uint StartNode::size_of() const { return sizeof(*this); }
54 bool StartNode::cmp( const Node &n ) const
55 { return _domain == ((StartNode&)n)._domain; }
56 const Type *StartNode::bottom_type() const { return _domain; }
57 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
58 #ifndef PRODUCT
59 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
60 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
61 #endif
62
63 //------------------------------Ideal------------------------------------------
64 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
65 return remove_dead_region(phase, can_reshape) ? this : nullptr;
66 }
67
68 //------------------------------calling_convention-----------------------------
69 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
70 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
71 }
72
73 //------------------------------Registers--------------------------------------
74 const RegMask &StartNode::in_RegMask(uint) const {
75 return RegMask::Empty;
76 }
77
78 //------------------------------match------------------------------------------
79 // Construct projections for incoming parameters, and their RegMask info
80 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
81 switch (proj->_con) {
82 case TypeFunc::Control:
83 case TypeFunc::I_O:
84 case TypeFunc::Memory:
85 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
86 case TypeFunc::FramePtr:
87 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
88 case TypeFunc::ReturnAdr:
89 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
90 case TypeFunc::Parms:
91 default: {
92 uint parm_num = proj->_con - TypeFunc::Parms;
93 const Type *t = _domain->field_at(proj->_con);
94 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
95 return new ConNode(Type::TOP);
96 uint ideal_reg = t->ideal_reg();
97 RegMask &rm = match->_calling_convention_mask[parm_num];
98 return new MachProjNode(this,proj->_con,rm,ideal_reg);
99 }
100 }
101 return nullptr;
102 }
103
104 //------------------------------StartOSRNode----------------------------------
105 // The method start node for an on stack replacement adapter
106
107 //------------------------------osr_domain-----------------------------
108 const TypeTuple *StartOSRNode::osr_domain() {
109 const Type **fields = TypeTuple::fields(2);
110 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
111
112 return TypeTuple::make(TypeFunc::Parms+1, fields);
113 }
114
115 //=============================================================================
116 const char * const ParmNode::names[TypeFunc::Parms+1] = {
117 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
118 };
119
120 #ifndef PRODUCT
121 void ParmNode::dump_spec(outputStream *st) const {
122 if( _con < TypeFunc::Parms ) {
123 st->print("%s", names[_con]);
124 } else {
125 st->print("Parm%d: ",_con-TypeFunc::Parms);
126 // Verbose and WizardMode dump bottom_type for all nodes
127 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
128 }
129 }
130
131 void ParmNode::dump_compact_spec(outputStream *st) const {
132 if (_con < TypeFunc::Parms) {
133 st->print("%s", names[_con]);
134 } else {
480 if (cik->is_instance_klass()) {
481 cik->print_name_on(st);
482 iklass = cik->as_instance_klass();
483 } else if (cik->is_type_array_klass()) {
484 cik->as_array_klass()->base_element_type()->print_name_on(st);
485 st->print("[%d]", spobj->n_fields());
486 } else if (cik->is_obj_array_klass()) {
487 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
488 if (cie->is_instance_klass()) {
489 cie->print_name_on(st);
490 } else if (cie->is_type_array_klass()) {
491 cie->as_array_klass()->base_element_type()->print_name_on(st);
492 } else {
493 ShouldNotReachHere();
494 }
495 st->print("[%d]", spobj->n_fields());
496 int ndim = cik->as_array_klass()->dimension() - 1;
497 while (ndim-- > 0) {
498 st->print("[]");
499 }
500 }
501 st->print("={");
502 uint nf = spobj->n_fields();
503 if (nf > 0) {
504 uint first_ind = spobj->first_index(mcall->jvms());
505 Node* fld_node = mcall->in(first_ind);
506 ciField* cifield;
507 if (iklass != nullptr) {
508 st->print(" [");
509 cifield = iklass->nonstatic_field_at(0);
510 cifield->print_name_on(st);
511 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
512 } else {
513 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
514 }
515 for (uint j = 1; j < nf; j++) {
516 fld_node = mcall->in(first_ind+j);
517 if (iklass != nullptr) {
518 st->print(", [");
519 cifield = iklass->nonstatic_field_at(j);
520 cifield->print_name_on(st);
521 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
522 } else {
523 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
524 }
525 }
526 }
527 st->print(" }");
528 }
529 }
530 st->cr();
531 if (caller() != nullptr) caller()->format(regalloc, n, st);
532 }
533
534
535 void JVMState::dump_spec(outputStream *st) const {
536 if (_method != nullptr) {
537 bool printed = false;
538 if (!Verbose) {
539 // The JVMS dumps make really, really long lines.
540 // Take out the most boring parts, which are the package prefixes.
698 if (i == TypeFunc::Parms) st->print("(");
699 Node* p = in(i);
700 if (p != nullptr) {
701 p->dump_idx(false, st, dc);
702 st->print(" ");
703 } else {
704 st->print("_ ");
705 }
706 }
707 st->print(")");
708 }
709
710 void CallNode::dump_spec(outputStream *st) const {
711 st->print(" ");
712 if (tf() != nullptr) tf()->dump_on(st);
713 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
714 if (jvms() != nullptr) jvms()->dump_spec(st);
715 }
716 #endif
717
718 const Type *CallNode::bottom_type() const { return tf()->range(); }
719 const Type* CallNode::Value(PhaseGVN* phase) const {
720 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
721 return Type::TOP;
722 }
723 return tf()->range();
724 }
725
726 //------------------------------calling_convention-----------------------------
727 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
728 // Use the standard compiler calling convention
729 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
730 }
731
732
733 //------------------------------match------------------------------------------
734 // Construct projections for control, I/O, memory-fields, ..., and
735 // return result(s) along with their RegMask info
736 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
737 switch (proj->_con) {
738 case TypeFunc::Control:
739 case TypeFunc::I_O:
740 case TypeFunc::Memory:
741 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
742
743 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
744 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
745 // 2nd half of doubles and longs
746 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
747
748 case TypeFunc::Parms: { // Normal returns
749 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
750 OptoRegPair regs = Opcode() == Op_CallLeafVector
751 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
752 : is_CallRuntime()
753 ? match->c_return_value(ideal_reg) // Calls into C runtime
754 : match-> return_value(ideal_reg); // Calls into compiled Java code
755 RegMask rm = RegMask(regs.first());
756
757 if (Opcode() == Op_CallLeafVector) {
758 // If the return is in vector, compute appropriate regmask taking into account the whole range
759 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
760 if(OptoReg::is_valid(regs.second())) {
761 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
762 rm.Insert(r);
763 }
764 }
765 }
766 }
767
768 if( OptoReg::is_valid(regs.second()) )
769 rm.Insert( regs.second() );
770 return new MachProjNode(this,proj->_con,rm,ideal_reg);
771 }
772
773 case TypeFunc::ReturnAdr:
774 case TypeFunc::FramePtr:
775 default:
776 ShouldNotReachHere();
777 }
778 return nullptr;
779 }
780
781 // Do we Match on this edge index or not? Match no edges
782 uint CallNode::match_edge(uint idx) const {
783 return 0;
784 }
785
786 //
787 // Determine whether the call could modify the field of the specified
788 // instance at the specified offset.
789 //
790 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
791 assert((t_oop != nullptr), "sanity");
792 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
793 const TypeTuple* args = _tf->domain();
794 Node* dest = nullptr;
795 // Stubs that can be called once an ArrayCopyNode is expanded have
796 // different signatures. Look for the second pointer argument,
797 // that is the destination of the copy.
798 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
799 if (args->field_at(i)->isa_ptr()) {
800 j++;
801 if (j == 2) {
802 dest = in(i);
803 break;
804 }
805 }
806 }
807 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
808 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
809 return true;
810 }
811 return false;
812 }
813 if (t_oop->is_known_instance()) {
822 Node* proj = proj_out_or_null(TypeFunc::Parms);
823 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
824 return false;
825 }
826 }
827 if (is_CallJava() && as_CallJava()->method() != nullptr) {
828 ciMethod* meth = as_CallJava()->method();
829 if (meth->is_getter()) {
830 return false;
831 }
832 // May modify (by reflection) if an boxing object is passed
833 // as argument or returned.
834 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
835 if (proj != nullptr) {
836 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
837 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
838 (inst_t->instance_klass() == boxing_klass))) {
839 return true;
840 }
841 }
842 const TypeTuple* d = tf()->domain();
843 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
844 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
845 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
846 (inst_t->instance_klass() == boxing_klass))) {
847 return true;
848 }
849 }
850 return false;
851 }
852 }
853 return true;
854 }
855
856 // Does this call have a direct reference to n other than debug information?
857 bool CallNode::has_non_debug_use(Node *n) {
858 const TypeTuple * d = tf()->domain();
859 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
860 Node *arg = in(i);
861 if (arg == n) {
862 return true;
863 }
864 }
865 return false;
866 }
867
868 // Returns the unique CheckCastPP of a call
869 // or 'this' if there are several CheckCastPP or unexpected uses
870 // or returns null if there is no one.
871 Node *CallNode::result_cast() {
872 Node *cast = nullptr;
873
874 Node *p = proj_out_or_null(TypeFunc::Parms);
875 if (p == nullptr)
876 return nullptr;
877
878 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
879 Node *use = p->fast_out(i);
880 if (use->is_CheckCastPP()) {
881 if (cast != nullptr) {
882 return this; // more than 1 CheckCastPP
883 }
884 cast = use;
885 } else if (!use->is_Initialize() &&
886 !use->is_AddP() &&
887 use->Opcode() != Op_MemBarStoreStore) {
888 // Expected uses are restricted to a CheckCastPP, an Initialize
889 // node, a MemBarStoreStore (clone) and AddP nodes. If we
890 // encounter any other use (a Phi node can be seen in rare
891 // cases) return this to prevent incorrect optimizations.
892 return this;
893 }
894 }
895 return cast;
896 }
897
898
899 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
900 projs->fallthrough_proj = nullptr;
901 projs->fallthrough_catchproj = nullptr;
902 projs->fallthrough_ioproj = nullptr;
903 projs->catchall_ioproj = nullptr;
904 projs->catchall_catchproj = nullptr;
905 projs->fallthrough_memproj = nullptr;
906 projs->catchall_memproj = nullptr;
907 projs->resproj = nullptr;
908 projs->exobj = nullptr;
909
910 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
911 ProjNode *pn = fast_out(i)->as_Proj();
912 if (pn->outcnt() == 0) continue;
913 switch (pn->_con) {
914 case TypeFunc::Control:
915 {
916 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
917 projs->fallthrough_proj = pn;
918 const Node* cn = pn->unique_ctrl_out_or_null();
919 if (cn != nullptr && cn->is_Catch()) {
920 ProjNode *cpn = nullptr;
921 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
922 cpn = cn->fast_out(k)->as_Proj();
923 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
924 if (cpn->_con == CatchProjNode::fall_through_index)
925 projs->fallthrough_catchproj = cpn;
926 else {
927 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
928 projs->catchall_catchproj = cpn;
934 case TypeFunc::I_O:
935 if (pn->_is_io_use)
936 projs->catchall_ioproj = pn;
937 else
938 projs->fallthrough_ioproj = pn;
939 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
940 Node* e = pn->out(j);
941 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
942 assert(projs->exobj == nullptr, "only one");
943 projs->exobj = e;
944 }
945 }
946 break;
947 case TypeFunc::Memory:
948 if (pn->_is_io_use)
949 projs->catchall_memproj = pn;
950 else
951 projs->fallthrough_memproj = pn;
952 break;
953 case TypeFunc::Parms:
954 projs->resproj = pn;
955 break;
956 default:
957 assert(false, "unexpected projection from allocation node.");
958 }
959 }
960
961 // The resproj may not exist because the result could be ignored
962 // and the exception object may not exist if an exception handler
963 // swallows the exception but all the other must exist and be found.
964 assert(projs->fallthrough_proj != nullptr, "must be found");
965 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
966 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
967 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
968 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
969 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
970 if (separate_io_proj) {
971 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
972 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
973 }
974 }
975
976 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
977 #ifdef ASSERT
978 // Validate attached generator
979 CallGenerator* cg = generator();
980 if (cg != nullptr) {
981 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
982 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
983 }
984 #endif // ASSERT
985 return SafePointNode::Ideal(phase, can_reshape);
986 }
987
988 bool CallNode::is_call_to_arraycopystub() const {
989 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
990 return true;
991 }
992 return false;
993 }
994
995 //=============================================================================
996 uint CallJavaNode::size_of() const { return sizeof(*this); }
997 bool CallJavaNode::cmp( const Node &n ) const {
998 CallJavaNode &call = (CallJavaNode&)n;
999 return CallNode::cmp(call) && _method == call._method &&
1000 _override_symbolic_info == call._override_symbolic_info;
1001 }
1002
1003 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1004 // Copy debug information and adjust JVMState information
1005 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1006 uint new_dbg_start = tf()->domain()->cnt();
1007 int jvms_adj = new_dbg_start - old_dbg_start;
1008 assert (new_dbg_start == req(), "argument count mismatch");
1009 Compile* C = phase->C;
1010
1011 // SafePointScalarObject node could be referenced several times in debug info.
1012 // Use Dict to record cloned nodes.
1013 Dict* sosn_map = new Dict(cmpkey,hashkey);
1014 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1015 Node* old_in = sfpt->in(i);
1016 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1017 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1018 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1019 bool new_node;
1020 Node* new_in = old_sosn->clone(sosn_map, new_node);
1021 if (new_node) { // New node?
1022 new_in->set_req(0, C->root()); // reset control edge
1023 new_in = phase->transform(new_in); // Register new node.
1024 }
1025 old_in = new_in;
1026 }
1027 add_req(old_in);
1028 }
1029
1030 // JVMS may be shared so clone it before we modify it
1031 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1032 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1033 jvms->set_map(this);
1034 jvms->set_locoff(jvms->locoff()+jvms_adj);
1035 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1036 jvms->set_monoff(jvms->monoff()+jvms_adj);
1037 jvms->set_scloff(jvms->scloff()+jvms_adj);
1038 jvms->set_endoff(jvms->endoff()+jvms_adj);
1039 }
1040 }
1041
1042 #ifdef ASSERT
1043 bool CallJavaNode::validate_symbolic_info() const {
1044 if (method() == nullptr) {
1045 return true; // call into runtime or uncommon trap
1046 }
1047 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1048 ciMethod* callee = method();
1049 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1050 assert(override_symbolic_info(), "should be set");
1051 }
1052 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1053 return true;
1054 }
1055 #endif
1056
1057 #ifndef PRODUCT
1058 void CallJavaNode::dump_spec(outputStream* st) const {
1059 if( _method ) _method->print_short_name(st);
1060 CallNode::dump_spec(st);
1061 }
1062
1063 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1064 if (_method) {
1065 _method->print_short_name(st);
1066 } else {
1067 st->print("<?>");
1068 }
1069 }
1070 #endif
1071
1072 //=============================================================================
1073 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1074 bool CallStaticJavaNode::cmp( const Node &n ) const {
1075 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1076 return CallJavaNode::cmp(call);
1077 }
1078
1079 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1080 CallGenerator* cg = generator();
1081 if (can_reshape && cg != nullptr) {
1082 assert(IncrementalInlineMH, "required");
1083 assert(cg->call_node() == this, "mismatch");
1084 assert(cg->is_mh_late_inline(), "not virtual");
1085
1086 // Check whether this MH handle call becomes a candidate for inlining.
1087 ciMethod* callee = cg->method();
1088 vmIntrinsics::ID iid = callee->intrinsic_id();
1089 if (iid == vmIntrinsics::_invokeBasic) {
1090 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1091 phase->C->prepend_late_inline(cg);
1092 set_generator(nullptr);
1093 }
1094 } else if (iid == vmIntrinsics::_linkToNative) {
1095 // never retry
1096 } else {
1097 assert(callee->has_member_arg(), "wrong type of call?");
1098 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1099 phase->C->prepend_late_inline(cg);
1112
1113 //----------------------------uncommon_trap_request----------------------------
1114 // If this is an uncommon trap, return the request code, else zero.
1115 int CallStaticJavaNode::uncommon_trap_request() const {
1116 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1117 }
1118 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1119 #ifndef PRODUCT
1120 if (!(call->req() > TypeFunc::Parms &&
1121 call->in(TypeFunc::Parms) != nullptr &&
1122 call->in(TypeFunc::Parms)->is_Con() &&
1123 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1124 assert(in_dump() != 0, "OK if dumping");
1125 tty->print("[bad uncommon trap]");
1126 return 0;
1127 }
1128 #endif
1129 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1130 }
1131
1132 #ifndef PRODUCT
1133 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1134 st->print("# Static ");
1135 if (_name != nullptr) {
1136 st->print("%s", _name);
1137 int trap_req = uncommon_trap_request();
1138 if (trap_req != 0) {
1139 char buf[100];
1140 st->print("(%s)",
1141 Deoptimization::format_trap_request(buf, sizeof(buf),
1142 trap_req));
1143 }
1144 st->print(" ");
1145 }
1146 CallJavaNode::dump_spec(st);
1147 }
1148
1149 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1150 if (_method) {
1151 _method->print_short_name(st);
1216 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1217 bool CallRuntimeNode::cmp( const Node &n ) const {
1218 CallRuntimeNode &call = (CallRuntimeNode&)n;
1219 return CallNode::cmp(call) && !strcmp(_name,call._name);
1220 }
1221 #ifndef PRODUCT
1222 void CallRuntimeNode::dump_spec(outputStream *st) const {
1223 st->print("# ");
1224 st->print("%s", _name);
1225 CallNode::dump_spec(st);
1226 }
1227 #endif
1228 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1229 bool CallLeafVectorNode::cmp( const Node &n ) const {
1230 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1231 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1232 }
1233
1234 //------------------------------calling_convention-----------------------------
1235 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1236 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1237 }
1238
1239 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1240 #ifdef ASSERT
1241 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1242 "return vector size must match");
1243 const TypeTuple* d = tf()->domain();
1244 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1245 Node* arg = in(i);
1246 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1247 "vector argument size must match");
1248 }
1249 #endif
1250
1251 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1252 }
1253
1254 //=============================================================================
1255 //------------------------------calling_convention-----------------------------
1256
1257
1258 //=============================================================================
1259 #ifndef PRODUCT
1260 void CallLeafNode::dump_spec(outputStream *st) const {
1261 st->print("# ");
1262 st->print("%s", _name);
1263 CallNode::dump_spec(st);
1264 }
1265 #endif
1266
1267 //=============================================================================
1268
1269 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1270 assert(verify_jvms(jvms), "jvms must match");
1271 int loc = jvms->locoff() + idx;
1272 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1273 // If current local idx is top then local idx - 1 could
1274 // be a long/double that needs to be killed since top could
1275 // represent the 2nd half of the long/double.
1276 uint ideal = in(loc -1)->ideal_reg();
1277 if (ideal == Op_RegD || ideal == Op_RegL) {
1278 // set other (low index) half to top
1279 set_req(loc - 1, in(loc));
1280 }
1281 }
1282 set_req(loc, c);
1283 }
1284
1285 uint SafePointNode::size_of() const { return sizeof(*this); }
1286 bool SafePointNode::cmp( const Node &n ) const {
1297 }
1298 }
1299
1300
1301 //----------------------------next_exception-----------------------------------
1302 SafePointNode* SafePointNode::next_exception() const {
1303 if (len() == req()) {
1304 return nullptr;
1305 } else {
1306 Node* n = in(req());
1307 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1308 return (SafePointNode*) n;
1309 }
1310 }
1311
1312
1313 //------------------------------Ideal------------------------------------------
1314 // Skip over any collapsed Regions
1315 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1316 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1317 return remove_dead_region(phase, can_reshape) ? this : nullptr;
1318 }
1319
1320 //------------------------------Identity---------------------------------------
1321 // Remove obviously duplicate safepoints
1322 Node* SafePointNode::Identity(PhaseGVN* phase) {
1323
1324 // If you have back to back safepoints, remove one
1325 if (in(TypeFunc::Control)->is_SafePoint()) {
1326 Node* out_c = unique_ctrl_out_or_null();
1327 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1328 // outer loop's safepoint could confuse removal of the outer loop.
1329 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1330 return in(TypeFunc::Control);
1331 }
1332 }
1333
1334 // Transforming long counted loops requires a safepoint node. Do not
1335 // eliminate a safepoint until loop opts are over.
1336 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1337 Node *n0 = in(0)->in(0);
1455 }
1456
1457 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1458 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1459 int nb = igvn->C->root()->find_prec_edge(this);
1460 if (nb != -1) {
1461 igvn->delete_precedence_of(igvn->C->root(), nb);
1462 }
1463 }
1464
1465 //============== SafePointScalarObjectNode ==============
1466
1467 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1468 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1469 _first_index(first_index),
1470 _depth(depth),
1471 _n_fields(n_fields),
1472 _alloc(alloc)
1473 {
1474 #ifdef ASSERT
1475 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1476 alloc->dump();
1477 assert(false, "unexpected call node");
1478 }
1479 #endif
1480 init_class_id(Class_SafePointScalarObject);
1481 }
1482
1483 // Do not allow value-numbering for SafePointScalarObject node.
1484 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1485 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1486 return (&n == this); // Always fail except on self
1487 }
1488
1489 uint SafePointScalarObjectNode::ideal_reg() const {
1490 return 0; // No matching to machine instruction
1491 }
1492
1493 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1494 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1495 }
1560 new_node = false;
1561 return (SafePointScalarMergeNode*)cached;
1562 }
1563 new_node = true;
1564 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1565 sosn_map->Insert((void*)this, (void*)res);
1566 return res;
1567 }
1568
1569 #ifndef PRODUCT
1570 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1571 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1572 }
1573 #endif
1574
1575 //=============================================================================
1576 uint AllocateNode::size_of() const { return sizeof(*this); }
1577
1578 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1579 Node *ctrl, Node *mem, Node *abio,
1580 Node *size, Node *klass_node, Node *initial_test)
1581 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1582 {
1583 init_class_id(Class_Allocate);
1584 init_flags(Flag_is_macro);
1585 _is_scalar_replaceable = false;
1586 _is_non_escaping = false;
1587 _is_allocation_MemBar_redundant = false;
1588 Node *topnode = C->top();
1589
1590 init_req( TypeFunc::Control , ctrl );
1591 init_req( TypeFunc::I_O , abio );
1592 init_req( TypeFunc::Memory , mem );
1593 init_req( TypeFunc::ReturnAdr, topnode );
1594 init_req( TypeFunc::FramePtr , topnode );
1595 init_req( AllocSize , size);
1596 init_req( KlassNode , klass_node);
1597 init_req( InitialTest , initial_test);
1598 init_req( ALength , topnode);
1599 init_req( ValidLengthTest , topnode);
1600 C->add_macro_node(this);
1601 }
1602
1603 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1604 {
1605 assert(initializer != nullptr && initializer->is_object_initializer(),
1606 "unexpected initializer method");
1607 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1608 if (analyzer == nullptr) {
1609 return;
1610 }
1611
1612 // Allocation node is first parameter in its initializer
1613 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1614 _is_allocation_MemBar_redundant = true;
1615 }
1616 }
1617 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
1618 Node* mark_node = nullptr;
1619 if (UseCompactObjectHeaders) {
1620 Node* klass_node = in(AllocateNode::KlassNode);
1621 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1622 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1623 } else {
1624 // For now only enable fast locking for non-array types
1625 mark_node = phase->MakeConX(markWord::prototype().value());
1626 }
1627 return mark_node;
1628 }
1629
1630 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1631 // CastII, if appropriate. If we are not allowed to create new nodes, and
1632 // a CastII is appropriate, return null.
1633 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1634 Node *length = in(AllocateNode::ALength);
1635 assert(length != nullptr, "length is not null");
1636
1637 const TypeInt* length_type = phase->find_int_type(length);
1638 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1639
1640 if (ary_type != nullptr && length_type != nullptr) {
1641 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1642 if (narrow_length_type != length_type) {
1643 // Assert one of:
1644 // - the narrow_length is 0
1645 // - the narrow_length is not wider than length
1646 assert(narrow_length_type == TypeInt::ZERO ||
1647 (length_type->is_con() && narrow_length_type->is_con() &&
2003
2004 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2005 st->print("%s", _kind_names[_kind]);
2006 }
2007 #endif
2008
2009 //=============================================================================
2010 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2011
2012 // perform any generic optimizations first (returns 'this' or null)
2013 Node *result = SafePointNode::Ideal(phase, can_reshape);
2014 if (result != nullptr) return result;
2015 // Don't bother trying to transform a dead node
2016 if (in(0) && in(0)->is_top()) return nullptr;
2017
2018 // Now see if we can optimize away this lock. We don't actually
2019 // remove the locking here, we simply set the _eliminate flag which
2020 // prevents macro expansion from expanding the lock. Since we don't
2021 // modify the graph, the value returned from this function is the
2022 // one computed above.
2023 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2024 //
2025 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2026 //
2027 ConnectionGraph *cgr = phase->C->congraph();
2028 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2029 assert(!is_eliminated() || is_coarsened(), "sanity");
2030 // The lock could be marked eliminated by lock coarsening
2031 // code during first IGVN before EA. Replace coarsened flag
2032 // to eliminate all associated locks/unlocks.
2033 #ifdef ASSERT
2034 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2035 #endif
2036 this->set_non_esc_obj();
2037 return result;
2038 }
2039
2040 if (!phase->C->do_locks_coarsening()) {
2041 return result; // Compiling without locks coarsening
2042 }
2043 //
2204 }
2205
2206 //=============================================================================
2207 uint UnlockNode::size_of() const { return sizeof(*this); }
2208
2209 //=============================================================================
2210 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2211
2212 // perform any generic optimizations first (returns 'this' or null)
2213 Node *result = SafePointNode::Ideal(phase, can_reshape);
2214 if (result != nullptr) return result;
2215 // Don't bother trying to transform a dead node
2216 if (in(0) && in(0)->is_top()) return nullptr;
2217
2218 // Now see if we can optimize away this unlock. We don't actually
2219 // remove the unlocking here, we simply set the _eliminate flag which
2220 // prevents macro expansion from expanding the unlock. Since we don't
2221 // modify the graph, the value returned from this function is the
2222 // one computed above.
2223 // Escape state is defined after Parse phase.
2224 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2225 //
2226 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2227 //
2228 ConnectionGraph *cgr = phase->C->congraph();
2229 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2230 assert(!is_eliminated() || is_coarsened(), "sanity");
2231 // The lock could be marked eliminated by lock coarsening
2232 // code during first IGVN before EA. Replace coarsened flag
2233 // to eliminate all associated locks/unlocks.
2234 #ifdef ASSERT
2235 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2236 #endif
2237 this->set_non_esc_obj();
2238 }
2239 }
2240 return result;
2241 }
2242
2243 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2244 if (C == nullptr) {
2284 }
2285 // unrelated
2286 return false;
2287 }
2288
2289 if (dest_t->isa_aryptr()) {
2290 // arraycopy or array clone
2291 if (t_oop->isa_instptr()) {
2292 return false;
2293 }
2294 if (!t_oop->isa_aryptr()) {
2295 return true;
2296 }
2297
2298 const Type* elem = dest_t->is_aryptr()->elem();
2299 if (elem == Type::BOTTOM) {
2300 // An array but we don't know what elements are
2301 return true;
2302 }
2303
2304 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2305 uint dest_alias = phase->C->get_alias_index(dest_t);
2306 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2307
2308 return dest_alias == t_oop_alias;
2309 }
2310
2311 return true;
2312 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "ci/bcEscapeAnalyzer.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/inlinetypenode.hpp"
38 #include "opto/locknode.hpp"
39 #include "opto/machnode.hpp"
40 #include "opto/matcher.hpp"
41 #include "opto/parse.hpp"
42 #include "opto/regalloc.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/rootnode.hpp"
45 #include "opto/runtime.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "utilities/powerOfTwo.hpp"
49 #include "code/vmreg.hpp"
50
51 // Portions of code courtesy of Clifford Click
52
53 // Optimization - Graph Style
54
55 //=============================================================================
56 uint StartNode::size_of() const { return sizeof(*this); }
57 bool StartNode::cmp( const Node &n ) const
58 { return _domain == ((StartNode&)n)._domain; }
59 const Type *StartNode::bottom_type() const { return _domain; }
60 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
61 #ifndef PRODUCT
62 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
63 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
64 #endif
65
66 //------------------------------Ideal------------------------------------------
67 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
68 return remove_dead_region(phase, can_reshape) ? this : nullptr;
69 }
70
71 //------------------------------calling_convention-----------------------------
72 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
73 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
74 }
75
76 //------------------------------Registers--------------------------------------
77 const RegMask &StartNode::in_RegMask(uint) const {
78 return RegMask::Empty;
79 }
80
81 //------------------------------match------------------------------------------
82 // Construct projections for incoming parameters, and their RegMask info
83 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
84 switch (proj->_con) {
85 case TypeFunc::Control:
86 case TypeFunc::I_O:
87 case TypeFunc::Memory:
88 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
89 case TypeFunc::FramePtr:
90 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
91 case TypeFunc::ReturnAdr:
92 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
93 case TypeFunc::Parms:
94 default: {
95 uint parm_num = proj->_con - TypeFunc::Parms;
96 const Type *t = _domain->field_at(proj->_con);
97 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
98 return new ConNode(Type::TOP);
99 uint ideal_reg = t->ideal_reg();
100 RegMask &rm = match->_calling_convention_mask[parm_num];
101 return new MachProjNode(this,proj->_con,rm,ideal_reg);
102 }
103 }
104 return nullptr;
105 }
106
107 //=============================================================================
108 const char * const ParmNode::names[TypeFunc::Parms+1] = {
109 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
110 };
111
112 #ifndef PRODUCT
113 void ParmNode::dump_spec(outputStream *st) const {
114 if( _con < TypeFunc::Parms ) {
115 st->print("%s", names[_con]);
116 } else {
117 st->print("Parm%d: ",_con-TypeFunc::Parms);
118 // Verbose and WizardMode dump bottom_type for all nodes
119 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
120 }
121 }
122
123 void ParmNode::dump_compact_spec(outputStream *st) const {
124 if (_con < TypeFunc::Parms) {
125 st->print("%s", names[_con]);
126 } else {
472 if (cik->is_instance_klass()) {
473 cik->print_name_on(st);
474 iklass = cik->as_instance_klass();
475 } else if (cik->is_type_array_klass()) {
476 cik->as_array_klass()->base_element_type()->print_name_on(st);
477 st->print("[%d]", spobj->n_fields());
478 } else if (cik->is_obj_array_klass()) {
479 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
480 if (cie->is_instance_klass()) {
481 cie->print_name_on(st);
482 } else if (cie->is_type_array_klass()) {
483 cie->as_array_klass()->base_element_type()->print_name_on(st);
484 } else {
485 ShouldNotReachHere();
486 }
487 st->print("[%d]", spobj->n_fields());
488 int ndim = cik->as_array_klass()->dimension() - 1;
489 while (ndim-- > 0) {
490 st->print("[]");
491 }
492 } else if (cik->is_flat_array_klass()) {
493 ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
494 cie->print_name_on(st);
495 st->print("[%d]", spobj->n_fields());
496 int ndim = cik->as_array_klass()->dimension() - 1;
497 while (ndim-- > 0) {
498 st->print("[]");
499 }
500 }
501 st->print("={");
502 uint nf = spobj->n_fields();
503 if (nf > 0) {
504 uint first_ind = spobj->first_index(mcall->jvms());
505 if (iklass != nullptr && iklass->is_inlinetype()) {
506 Node* init_node = mcall->in(first_ind++);
507 if (!init_node->is_top()) {
508 st->print(" [is_init");
509 format_helper(regalloc, st, init_node, ":", -1, nullptr);
510 }
511 }
512 Node* fld_node = mcall->in(first_ind);
513 ciField* cifield;
514 if (iklass != nullptr) {
515 st->print(" [");
516 if (0 < (uint)iklass->nof_nonstatic_fields()) {
517 cifield = iklass->nonstatic_field_at(0);
518 cifield->print_name_on(st);
519 } else {
520 // Must be a null marker
521 st->print("null marker");
522 }
523 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
524 } else {
525 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
526 }
527 for (uint j = 1; j < nf; j++) {
528 fld_node = mcall->in(first_ind+j);
529 if (iklass != nullptr) {
530 st->print(", [");
531 if (j < (uint)iklass->nof_nonstatic_fields()) {
532 cifield = iklass->nonstatic_field_at(j);
533 cifield->print_name_on(st);
534 } else {
535 // Must be a null marker
536 st->print("null marker");
537 }
538 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
539 } else {
540 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
541 }
542 }
543 }
544 st->print(" }");
545 }
546 }
547 st->cr();
548 if (caller() != nullptr) caller()->format(regalloc, n, st);
549 }
550
551
552 void JVMState::dump_spec(outputStream *st) const {
553 if (_method != nullptr) {
554 bool printed = false;
555 if (!Verbose) {
556 // The JVMS dumps make really, really long lines.
557 // Take out the most boring parts, which are the package prefixes.
715 if (i == TypeFunc::Parms) st->print("(");
716 Node* p = in(i);
717 if (p != nullptr) {
718 p->dump_idx(false, st, dc);
719 st->print(" ");
720 } else {
721 st->print("_ ");
722 }
723 }
724 st->print(")");
725 }
726
727 void CallNode::dump_spec(outputStream *st) const {
728 st->print(" ");
729 if (tf() != nullptr) tf()->dump_on(st);
730 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
731 if (jvms() != nullptr) jvms()->dump_spec(st);
732 }
733 #endif
734
735 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
736 const Type* CallNode::Value(PhaseGVN* phase) const {
737 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
738 return Type::TOP;
739 }
740 return tf()->range_cc();
741 }
742
743 //------------------------------calling_convention-----------------------------
744 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
745 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
746 // The call to that stub is a special case: its inputs are
747 // multiple values returned from a call and so it should follow
748 // the return convention.
749 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
750 return;
751 }
752 // Use the standard compiler calling convention
753 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
754 }
755
756
757 //------------------------------match------------------------------------------
758 // Construct projections for control, I/O, memory-fields, ..., and
759 // return result(s) along with their RegMask info
760 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
761 uint con = proj->_con;
762 const TypeTuple* range_cc = tf()->range_cc();
763 if (con >= TypeFunc::Parms) {
764 if (tf()->returns_inline_type_as_fields()) {
765 // The call returns multiple values (inline type fields): we
766 // create one projection per returned value.
767 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
768 uint ideal_reg = range_cc->field_at(con)->ideal_reg();
769 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
770 } else {
771 if (con == TypeFunc::Parms) {
772 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
773 OptoRegPair regs = Opcode() == Op_CallLeafVector
774 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
775 : match->c_return_value(ideal_reg);
776 RegMask rm = RegMask(regs.first());
777
778 if (Opcode() == Op_CallLeafVector) {
779 // If the return is in vector, compute appropriate regmask taking into account the whole range
780 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
781 if(OptoReg::is_valid(regs.second())) {
782 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
783 rm.Insert(r);
784 }
785 }
786 }
787 }
788
789 if (OptoReg::is_valid(regs.second())) {
790 rm.Insert(regs.second());
791 }
792 return new MachProjNode(this,con,rm,ideal_reg);
793 } else {
794 assert(con == TypeFunc::Parms+1, "only one return value");
795 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
796 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
797 }
798 }
799 }
800
801 switch (con) {
802 case TypeFunc::Control:
803 case TypeFunc::I_O:
804 case TypeFunc::Memory:
805 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
806
807 case TypeFunc::ReturnAdr:
808 case TypeFunc::FramePtr:
809 default:
810 ShouldNotReachHere();
811 }
812 return nullptr;
813 }
814
815 // Do we Match on this edge index or not? Match no edges
816 uint CallNode::match_edge(uint idx) const {
817 return 0;
818 }
819
820 //
821 // Determine whether the call could modify the field of the specified
822 // instance at the specified offset.
823 //
824 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
825 assert((t_oop != nullptr), "sanity");
826 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
827 const TypeTuple* args = _tf->domain_sig();
828 Node* dest = nullptr;
829 // Stubs that can be called once an ArrayCopyNode is expanded have
830 // different signatures. Look for the second pointer argument,
831 // that is the destination of the copy.
832 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
833 if (args->field_at(i)->isa_ptr()) {
834 j++;
835 if (j == 2) {
836 dest = in(i);
837 break;
838 }
839 }
840 }
841 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
842 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
843 return true;
844 }
845 return false;
846 }
847 if (t_oop->is_known_instance()) {
856 Node* proj = proj_out_or_null(TypeFunc::Parms);
857 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
858 return false;
859 }
860 }
861 if (is_CallJava() && as_CallJava()->method() != nullptr) {
862 ciMethod* meth = as_CallJava()->method();
863 if (meth->is_getter()) {
864 return false;
865 }
866 // May modify (by reflection) if an boxing object is passed
867 // as argument or returned.
868 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
869 if (proj != nullptr) {
870 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
871 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
872 (inst_t->instance_klass() == boxing_klass))) {
873 return true;
874 }
875 }
876 const TypeTuple* d = tf()->domain_cc();
877 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
878 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
879 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
880 (inst_t->instance_klass() == boxing_klass))) {
881 return true;
882 }
883 }
884 return false;
885 }
886 }
887 return true;
888 }
889
890 // Does this call have a direct reference to n other than debug information?
891 bool CallNode::has_non_debug_use(Node* n) {
892 const TypeTuple* d = tf()->domain_cc();
893 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
894 if (in(i) == n) {
895 return true;
896 }
897 }
898 return false;
899 }
900
901 bool CallNode::has_debug_use(Node* n) {
902 if (jvms() != nullptr) {
903 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
904 if (in(i) == n) {
905 return true;
906 }
907 }
908 }
909 return false;
910 }
911
912 // Returns the unique CheckCastPP of a call
913 // or 'this' if there are several CheckCastPP or unexpected uses
914 // or returns null if there is no one.
915 Node *CallNode::result_cast() {
916 Node *cast = nullptr;
917
918 Node *p = proj_out_or_null(TypeFunc::Parms);
919 if (p == nullptr)
920 return nullptr;
921
922 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
923 Node *use = p->fast_out(i);
924 if (use->is_CheckCastPP()) {
925 if (cast != nullptr) {
926 return this; // more than 1 CheckCastPP
927 }
928 cast = use;
929 } else if (!use->is_Initialize() &&
930 !use->is_AddP() &&
931 use->Opcode() != Op_MemBarStoreStore) {
932 // Expected uses are restricted to a CheckCastPP, an Initialize
933 // node, a MemBarStoreStore (clone) and AddP nodes. If we
934 // encounter any other use (a Phi node can be seen in rare
935 // cases) return this to prevent incorrect optimizations.
936 return this;
937 }
938 }
939 return cast;
940 }
941
942
943 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
944 uint max_res = TypeFunc::Parms-1;
945 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
946 ProjNode *pn = fast_out(i)->as_Proj();
947 max_res = MAX2(max_res, pn->_con);
948 }
949
950 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
951
952 uint projs_size = sizeof(CallProjections);
953 if (max_res > TypeFunc::Parms) {
954 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
955 }
956 char* projs_storage = resource_allocate_bytes(projs_size);
957 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
958
959 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
960 ProjNode *pn = fast_out(i)->as_Proj();
961 if (pn->outcnt() == 0) continue;
962 switch (pn->_con) {
963 case TypeFunc::Control:
964 {
965 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
966 projs->fallthrough_proj = pn;
967 const Node* cn = pn->unique_ctrl_out_or_null();
968 if (cn != nullptr && cn->is_Catch()) {
969 ProjNode *cpn = nullptr;
970 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
971 cpn = cn->fast_out(k)->as_Proj();
972 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
973 if (cpn->_con == CatchProjNode::fall_through_index)
974 projs->fallthrough_catchproj = cpn;
975 else {
976 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
977 projs->catchall_catchproj = cpn;
983 case TypeFunc::I_O:
984 if (pn->_is_io_use)
985 projs->catchall_ioproj = pn;
986 else
987 projs->fallthrough_ioproj = pn;
988 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
989 Node* e = pn->out(j);
990 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
991 assert(projs->exobj == nullptr, "only one");
992 projs->exobj = e;
993 }
994 }
995 break;
996 case TypeFunc::Memory:
997 if (pn->_is_io_use)
998 projs->catchall_memproj = pn;
999 else
1000 projs->fallthrough_memproj = pn;
1001 break;
1002 case TypeFunc::Parms:
1003 projs->resproj[0] = pn;
1004 break;
1005 default:
1006 assert(pn->_con <= max_res, "unexpected projection from allocation node.");
1007 projs->resproj[pn->_con-TypeFunc::Parms] = pn;
1008 break;
1009 }
1010 }
1011
1012 // The resproj may not exist because the result could be ignored
1013 // and the exception object may not exist if an exception handler
1014 // swallows the exception but all the other must exist and be found.
1015 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1016 assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found");
1017 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1018 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
1019 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
1020 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
1021 if (separate_io_proj) {
1022 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
1023 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
1024 }
1025 return projs;
1026 }
1027
1028 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1029 #ifdef ASSERT
1030 // Validate attached generator
1031 CallGenerator* cg = generator();
1032 if (cg != nullptr) {
1033 assert((is_CallStaticJava() && cg->is_mh_late_inline()) ||
1034 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1035 }
1036 #endif // ASSERT
1037 return SafePointNode::Ideal(phase, can_reshape);
1038 }
1039
1040 bool CallNode::is_call_to_arraycopystub() const {
1041 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1042 return true;
1043 }
1044 return false;
1045 }
1046
1047 //=============================================================================
1048 uint CallJavaNode::size_of() const { return sizeof(*this); }
1049 bool CallJavaNode::cmp( const Node &n ) const {
1050 CallJavaNode &call = (CallJavaNode&)n;
1051 return CallNode::cmp(call) && _method == call._method &&
1052 _override_symbolic_info == call._override_symbolic_info;
1053 }
1054
1055 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1056 // Copy debug information and adjust JVMState information
1057 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1058 uint new_dbg_start = tf()->domain_sig()->cnt();
1059 int jvms_adj = new_dbg_start - old_dbg_start;
1060 assert (new_dbg_start == req(), "argument count mismatch");
1061 Compile* C = phase->C;
1062
1063 // SafePointScalarObject node could be referenced several times in debug info.
1064 // Use Dict to record cloned nodes.
1065 Dict* sosn_map = new Dict(cmpkey,hashkey);
1066 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1067 Node* old_in = sfpt->in(i);
1068 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1069 if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1070 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1071 bool new_node;
1072 Node* new_in = old_sosn->clone(sosn_map, new_node);
1073 if (new_node) { // New node?
1074 new_in->set_req(0, C->root()); // reset control edge
1075 new_in = phase->transform(new_in); // Register new node.
1076 }
1077 old_in = new_in;
1078 }
1079 add_req(old_in);
1080 }
1081
1082 // JVMS may be shared so clone it before we modify it
1083 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1084 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1085 jvms->set_map(this);
1086 jvms->set_locoff(jvms->locoff()+jvms_adj);
1087 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1088 jvms->set_monoff(jvms->monoff()+jvms_adj);
1089 jvms->set_scloff(jvms->scloff()+jvms_adj);
1090 jvms->set_endoff(jvms->endoff()+jvms_adj);
1091 }
1092 }
1093
1094 #ifdef ASSERT
1095 bool CallJavaNode::validate_symbolic_info() const {
1096 if (method() == nullptr) {
1097 return true; // call into runtime or uncommon trap
1098 }
1099 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1100 if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1101 return true;
1102 }
1103 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1104 ciMethod* callee = method();
1105 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1106 assert(override_symbolic_info(), "should be set");
1107 }
1108 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1109 return true;
1110 }
1111 #endif
1112
1113 #ifndef PRODUCT
1114 void CallJavaNode::dump_spec(outputStream* st) const {
1115 if( _method ) _method->print_short_name(st);
1116 CallNode::dump_spec(st);
1117 }
1118
1119 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1120 if (_method) {
1121 _method->print_short_name(st);
1122 } else {
1123 st->print("<?>");
1124 }
1125 }
1126 #endif
1127
1128 //=============================================================================
1129 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1130 bool CallStaticJavaNode::cmp( const Node &n ) const {
1131 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1132 return CallJavaNode::cmp(call);
1133 }
1134
1135 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1136 if (can_reshape && uncommon_trap_request() != 0) {
1137 PhaseIterGVN* igvn = phase->is_IterGVN();
1138 if (remove_unknown_flat_array_load(igvn, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
1139 if (!in(0)->is_Region()) {
1140 igvn->replace_input_of(this, 0, phase->C->top());
1141 }
1142 return this;
1143 }
1144 }
1145
1146 CallGenerator* cg = generator();
1147 if (can_reshape && cg != nullptr) {
1148 assert(IncrementalInlineMH, "required");
1149 assert(cg->call_node() == this, "mismatch");
1150 assert(cg->is_mh_late_inline(), "not virtual");
1151
1152 // Check whether this MH handle call becomes a candidate for inlining.
1153 ciMethod* callee = cg->method();
1154 vmIntrinsics::ID iid = callee->intrinsic_id();
1155 if (iid == vmIntrinsics::_invokeBasic) {
1156 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1157 phase->C->prepend_late_inline(cg);
1158 set_generator(nullptr);
1159 }
1160 } else if (iid == vmIntrinsics::_linkToNative) {
1161 // never retry
1162 } else {
1163 assert(callee->has_member_arg(), "wrong type of call?");
1164 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1165 phase->C->prepend_late_inline(cg);
1178
1179 //----------------------------uncommon_trap_request----------------------------
1180 // If this is an uncommon trap, return the request code, else zero.
1181 int CallStaticJavaNode::uncommon_trap_request() const {
1182 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1183 }
1184 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1185 #ifndef PRODUCT
1186 if (!(call->req() > TypeFunc::Parms &&
1187 call->in(TypeFunc::Parms) != nullptr &&
1188 call->in(TypeFunc::Parms)->is_Con() &&
1189 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1190 assert(in_dump() != 0, "OK if dumping");
1191 tty->print("[bad uncommon trap]");
1192 return 0;
1193 }
1194 #endif
1195 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1196 }
1197
1198 // Split if can cause the flat array branch of an array load with unknown type (see
1199 // Parse::array_load) to end in an uncommon trap. In that case, the call to
1200 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState.
1201 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) {
1202 if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) {
1203 return false;
1204 }
1205 if (ctl->is_Region()) {
1206 bool res = false;
1207 for (uint i = 1; i < ctl->req(); i++) {
1208 MergeMemNode* mm = mem->clone()->as_MergeMem();
1209 for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1210 Node* m = mms.memory();
1211 if (m->is_Phi() && m->in(0) == ctl) {
1212 mms.set_memory(m->in(i));
1213 }
1214 }
1215 if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) {
1216 res = true;
1217 if (!ctl->in(i)->is_Region()) {
1218 igvn->replace_input_of(ctl, i, igvn->C->top());
1219 }
1220 }
1221 igvn->remove_dead_node(mm);
1222 }
1223 return res;
1224 }
1225 // Verify the control flow is ok
1226 Node* call = ctl;
1227 MemBarNode* membar = nullptr;
1228 for (;;) {
1229 if (call == nullptr || call->is_top()) {
1230 return false;
1231 }
1232 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1233 call = call->in(0);
1234 } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() &&
1235 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1236 assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
1237 membar = call->in(0)->in(0)->as_MemBar();
1238 break;
1239 } else {
1240 return false;
1241 }
1242 }
1243
1244 JVMState* jvms = call->jvms();
1245 if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1246 return false;
1247 }
1248
1249 Node* call_mem = call->in(TypeFunc::Memory);
1250 if (call_mem == nullptr || call_mem->is_top()) {
1251 return false;
1252 }
1253 if (!call_mem->is_MergeMem()) {
1254 call_mem = MergeMemNode::make(call_mem);
1255 igvn->register_new_node_with_optimizer(call_mem);
1256 }
1257
1258 // Verify that there's no unexpected side effect
1259 for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1260 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1261 Node* m2 = mms2.memory2();
1262
1263 for (uint i = 0; i < 100; i++) {
1264 if (m1 == m2) {
1265 break;
1266 } else if (m1->is_Proj()) {
1267 m1 = m1->in(0);
1268 } else if (m1->is_MemBar()) {
1269 m1 = m1->in(TypeFunc::Memory);
1270 } else if (m1->Opcode() == Op_CallStaticJava &&
1271 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1272 if (m1 != call) {
1273 return false;
1274 }
1275 break;
1276 } else if (m1->is_MergeMem()) {
1277 MergeMemNode* mm = m1->as_MergeMem();
1278 int idx = mms2.alias_idx();
1279 if (idx == Compile::AliasIdxBot) {
1280 m1 = mm->base_memory();
1281 } else {
1282 m1 = mm->memory_at(idx);
1283 }
1284 } else {
1285 return false;
1286 }
1287 }
1288 }
1289 if (call_mem->outcnt() == 0) {
1290 igvn->remove_dead_node(call_mem);
1291 }
1292
1293 // Remove membar preceding the call
1294 membar->remove(igvn);
1295
1296 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
1297 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr);
1298 unc->init_req(TypeFunc::Control, call->in(0));
1299 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1300 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1301 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
1302 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1303 unc->init_req(TypeFunc::Parms+0, unc_arg);
1304 unc->set_cnt(PROB_UNLIKELY_MAG(4));
1305 unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1306
1307 // Replace the call with an uncommon trap
1308 igvn->replace_input_of(call, 0, igvn->C->top());
1309
1310 igvn->register_new_node_with_optimizer(unc);
1311
1312 Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control));
1313 Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1314 igvn->add_input_to(igvn->C->root(), halt);
1315
1316 return true;
1317 }
1318
1319
1320 #ifndef PRODUCT
1321 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1322 st->print("# Static ");
1323 if (_name != nullptr) {
1324 st->print("%s", _name);
1325 int trap_req = uncommon_trap_request();
1326 if (trap_req != 0) {
1327 char buf[100];
1328 st->print("(%s)",
1329 Deoptimization::format_trap_request(buf, sizeof(buf),
1330 trap_req));
1331 }
1332 st->print(" ");
1333 }
1334 CallJavaNode::dump_spec(st);
1335 }
1336
1337 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1338 if (_method) {
1339 _method->print_short_name(st);
1404 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1405 bool CallRuntimeNode::cmp( const Node &n ) const {
1406 CallRuntimeNode &call = (CallRuntimeNode&)n;
1407 return CallNode::cmp(call) && !strcmp(_name,call._name);
1408 }
1409 #ifndef PRODUCT
1410 void CallRuntimeNode::dump_spec(outputStream *st) const {
1411 st->print("# ");
1412 st->print("%s", _name);
1413 CallNode::dump_spec(st);
1414 }
1415 #endif
1416 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1417 bool CallLeafVectorNode::cmp( const Node &n ) const {
1418 CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1419 return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1420 }
1421
1422 //------------------------------calling_convention-----------------------------
1423 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1424 if (_entry_point == nullptr) {
1425 // The call to that stub is a special case: its inputs are
1426 // multiple values returned from a call and so it should follow
1427 // the return convention.
1428 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1429 return;
1430 }
1431 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1432 }
1433
1434 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1435 #ifdef ASSERT
1436 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1437 "return vector size must match");
1438 const TypeTuple* d = tf()->domain_sig();
1439 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1440 Node* arg = in(i);
1441 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1442 "vector argument size must match");
1443 }
1444 #endif
1445
1446 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1447 }
1448
1449 //=============================================================================
1450 //------------------------------calling_convention-----------------------------
1451
1452
1453 //=============================================================================
1454 #ifndef PRODUCT
1455 void CallLeafNode::dump_spec(outputStream *st) const {
1456 st->print("# ");
1457 st->print("%s", _name);
1458 CallNode::dump_spec(st);
1459 }
1460 #endif
1461
1462 uint CallLeafNoFPNode::match_edge(uint idx) const {
1463 // Null entry point is a special case for which the target is in a
1464 // register. Need to match that edge.
1465 return entry_point() == nullptr && idx == TypeFunc::Parms;
1466 }
1467
1468 //=============================================================================
1469
1470 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1471 assert(verify_jvms(jvms), "jvms must match");
1472 int loc = jvms->locoff() + idx;
1473 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1474 // If current local idx is top then local idx - 1 could
1475 // be a long/double that needs to be killed since top could
1476 // represent the 2nd half of the long/double.
1477 uint ideal = in(loc -1)->ideal_reg();
1478 if (ideal == Op_RegD || ideal == Op_RegL) {
1479 // set other (low index) half to top
1480 set_req(loc - 1, in(loc));
1481 }
1482 }
1483 set_req(loc, c);
1484 }
1485
1486 uint SafePointNode::size_of() const { return sizeof(*this); }
1487 bool SafePointNode::cmp( const Node &n ) const {
1498 }
1499 }
1500
1501
1502 //----------------------------next_exception-----------------------------------
1503 SafePointNode* SafePointNode::next_exception() const {
1504 if (len() == req()) {
1505 return nullptr;
1506 } else {
1507 Node* n = in(req());
1508 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1509 return (SafePointNode*) n;
1510 }
1511 }
1512
1513
1514 //------------------------------Ideal------------------------------------------
1515 // Skip over any collapsed Regions
1516 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1517 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1518 if (remove_dead_region(phase, can_reshape)) {
1519 return this;
1520 }
1521 // Scalarize inline types in safepoint debug info.
1522 // Delay this until all inlining is over to avoid getting inconsistent debug info.
1523 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) {
1524 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
1525 Node* n = in(i)->uncast();
1526 if (n->is_InlineType()) {
1527 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
1528 }
1529 }
1530 }
1531 return nullptr;
1532 }
1533
1534 //------------------------------Identity---------------------------------------
1535 // Remove obviously duplicate safepoints
1536 Node* SafePointNode::Identity(PhaseGVN* phase) {
1537
1538 // If you have back to back safepoints, remove one
1539 if (in(TypeFunc::Control)->is_SafePoint()) {
1540 Node* out_c = unique_ctrl_out_or_null();
1541 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1542 // outer loop's safepoint could confuse removal of the outer loop.
1543 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1544 return in(TypeFunc::Control);
1545 }
1546 }
1547
1548 // Transforming long counted loops requires a safepoint node. Do not
1549 // eliminate a safepoint until loop opts are over.
1550 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1551 Node *n0 = in(0)->in(0);
1669 }
1670
1671 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1672 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1673 int nb = igvn->C->root()->find_prec_edge(this);
1674 if (nb != -1) {
1675 igvn->delete_precedence_of(igvn->C->root(), nb);
1676 }
1677 }
1678
1679 //============== SafePointScalarObjectNode ==============
1680
1681 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1682 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1683 _first_index(first_index),
1684 _depth(depth),
1685 _n_fields(n_fields),
1686 _alloc(alloc)
1687 {
1688 #ifdef ASSERT
1689 if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1690 alloc->dump();
1691 assert(false, "unexpected call node");
1692 }
1693 #endif
1694 init_class_id(Class_SafePointScalarObject);
1695 }
1696
1697 // Do not allow value-numbering for SafePointScalarObject node.
1698 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1699 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1700 return (&n == this); // Always fail except on self
1701 }
1702
1703 uint SafePointScalarObjectNode::ideal_reg() const {
1704 return 0; // No matching to machine instruction
1705 }
1706
1707 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1708 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1709 }
1774 new_node = false;
1775 return (SafePointScalarMergeNode*)cached;
1776 }
1777 new_node = true;
1778 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1779 sosn_map->Insert((void*)this, (void*)res);
1780 return res;
1781 }
1782
1783 #ifndef PRODUCT
1784 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1785 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1786 }
1787 #endif
1788
1789 //=============================================================================
1790 uint AllocateNode::size_of() const { return sizeof(*this); }
1791
1792 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1793 Node *ctrl, Node *mem, Node *abio,
1794 Node *size, Node *klass_node,
1795 Node* initial_test,
1796 InlineTypeNode* inline_type_node)
1797 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1798 {
1799 init_class_id(Class_Allocate);
1800 init_flags(Flag_is_macro);
1801 _is_scalar_replaceable = false;
1802 _is_non_escaping = false;
1803 _is_allocation_MemBar_redundant = false;
1804 _larval = false;
1805 Node *topnode = C->top();
1806
1807 init_req( TypeFunc::Control , ctrl );
1808 init_req( TypeFunc::I_O , abio );
1809 init_req( TypeFunc::Memory , mem );
1810 init_req( TypeFunc::ReturnAdr, topnode );
1811 init_req( TypeFunc::FramePtr , topnode );
1812 init_req( AllocSize , size);
1813 init_req( KlassNode , klass_node);
1814 init_req( InitialTest , initial_test);
1815 init_req( ALength , topnode);
1816 init_req( ValidLengthTest , topnode);
1817 init_req( InlineType , inline_type_node);
1818 // DefaultValue defaults to nullptr
1819 // RawDefaultValue defaults to nullptr
1820 C->add_macro_node(this);
1821 }
1822
1823 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1824 {
1825 assert(initializer != nullptr &&
1826 (initializer->is_object_constructor() || initializer->is_class_initializer()),
1827 "unexpected initializer method");
1828 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1829 if (analyzer == nullptr) {
1830 return;
1831 }
1832
1833 // Allocation node is first parameter in its initializer
1834 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1835 _is_allocation_MemBar_redundant = true;
1836 }
1837 }
1838
1839 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1840 Node* mark_node = nullptr;
1841 if (UseCompactObjectHeaders || EnableValhalla) {
1842 Node* klass_node = in(AllocateNode::KlassNode);
1843 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1844 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1845 if (EnableValhalla) {
1846 mark_node = phase->transform(mark_node);
1847 // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
1848 mark_node = new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
1849 }
1850 return mark_node;
1851 } else {
1852 return phase->MakeConX(markWord::prototype().value());
1853 }
1854 }
1855
1856 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1857 // CastII, if appropriate. If we are not allowed to create new nodes, and
1858 // a CastII is appropriate, return null.
1859 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1860 Node *length = in(AllocateNode::ALength);
1861 assert(length != nullptr, "length is not null");
1862
1863 const TypeInt* length_type = phase->find_int_type(length);
1864 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1865
1866 if (ary_type != nullptr && length_type != nullptr) {
1867 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1868 if (narrow_length_type != length_type) {
1869 // Assert one of:
1870 // - the narrow_length is 0
1871 // - the narrow_length is not wider than length
1872 assert(narrow_length_type == TypeInt::ZERO ||
1873 (length_type->is_con() && narrow_length_type->is_con() &&
2229
2230 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2231 st->print("%s", _kind_names[_kind]);
2232 }
2233 #endif
2234
2235 //=============================================================================
2236 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2237
2238 // perform any generic optimizations first (returns 'this' or null)
2239 Node *result = SafePointNode::Ideal(phase, can_reshape);
2240 if (result != nullptr) return result;
2241 // Don't bother trying to transform a dead node
2242 if (in(0) && in(0)->is_top()) return nullptr;
2243
2244 // Now see if we can optimize away this lock. We don't actually
2245 // remove the locking here, we simply set the _eliminate flag which
2246 // prevents macro expansion from expanding the lock. Since we don't
2247 // modify the graph, the value returned from this function is the
2248 // one computed above.
2249 const Type* obj_type = phase->type(obj_node());
2250 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2251 //
2252 // If we are locking an non-escaped object, the lock/unlock is unnecessary
2253 //
2254 ConnectionGraph *cgr = phase->C->congraph();
2255 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2256 assert(!is_eliminated() || is_coarsened(), "sanity");
2257 // The lock could be marked eliminated by lock coarsening
2258 // code during first IGVN before EA. Replace coarsened flag
2259 // to eliminate all associated locks/unlocks.
2260 #ifdef ASSERT
2261 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2262 #endif
2263 this->set_non_esc_obj();
2264 return result;
2265 }
2266
2267 if (!phase->C->do_locks_coarsening()) {
2268 return result; // Compiling without locks coarsening
2269 }
2270 //
2431 }
2432
2433 //=============================================================================
2434 uint UnlockNode::size_of() const { return sizeof(*this); }
2435
2436 //=============================================================================
2437 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2438
2439 // perform any generic optimizations first (returns 'this' or null)
2440 Node *result = SafePointNode::Ideal(phase, can_reshape);
2441 if (result != nullptr) return result;
2442 // Don't bother trying to transform a dead node
2443 if (in(0) && in(0)->is_top()) return nullptr;
2444
2445 // Now see if we can optimize away this unlock. We don't actually
2446 // remove the unlocking here, we simply set the _eliminate flag which
2447 // prevents macro expansion from expanding the unlock. Since we don't
2448 // modify the graph, the value returned from this function is the
2449 // one computed above.
2450 // Escape state is defined after Parse phase.
2451 const Type* obj_type = phase->type(obj_node());
2452 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2453 //
2454 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2455 //
2456 ConnectionGraph *cgr = phase->C->congraph();
2457 if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2458 assert(!is_eliminated() || is_coarsened(), "sanity");
2459 // The lock could be marked eliminated by lock coarsening
2460 // code during first IGVN before EA. Replace coarsened flag
2461 // to eliminate all associated locks/unlocks.
2462 #ifdef ASSERT
2463 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2464 #endif
2465 this->set_non_esc_obj();
2466 }
2467 }
2468 return result;
2469 }
2470
2471 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
2472 if (C == nullptr) {
2512 }
2513 // unrelated
2514 return false;
2515 }
2516
2517 if (dest_t->isa_aryptr()) {
2518 // arraycopy or array clone
2519 if (t_oop->isa_instptr()) {
2520 return false;
2521 }
2522 if (!t_oop->isa_aryptr()) {
2523 return true;
2524 }
2525
2526 const Type* elem = dest_t->is_aryptr()->elem();
2527 if (elem == Type::BOTTOM) {
2528 // An array but we don't know what elements are
2529 return true;
2530 }
2531
2532 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2533 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2534 uint dest_alias = phase->C->get_alias_index(dest_t);
2535 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2536
2537 return dest_alias == t_oop_alias;
2538 }
2539
2540 return true;
2541 }
|