16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subnode.hpp"
45 #include "opto/vectornode.hpp"
46 #include "utilities/vmError.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 //------------------------------Value------------------------------------------
54 // Compute the type of the RegionNode.
55 const Type* RegionNode::Value(PhaseGVN* phase) const {
376 Node *n = (Node*)phase->C->root();
377 nstack.push(n);
378 visited.set(n->_idx);
379 while (nstack.size() != 0) {
380 n = nstack.pop();
381 uint max = n->outcnt();
382 for (uint i = 0; i < max; i++) {
383 Node* m = n->raw_out(i);
384 if (m != NULL && m->is_CFG()) {
385 if (m == this) {
386 return false; // We reached the Region node - it is not dead.
387 }
388 if (!visited.test_set(m->_idx))
389 nstack.push(m);
390 }
391 }
392 }
393 return true; // The Region node is unreachable - it is dead.
394 }
395
396 bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) {
397 // Incremental inlining + PhaseStringOpts sometimes produce:
398 //
399 // cmpP with 1 top input
400 // |
401 // If
402 // / \
403 // IfFalse IfTrue /- Some Node
404 // \ / / /
405 // Region / /-MergeMem
406 // \---Phi
407 //
408 //
409 // It's expected by PhaseStringOpts that the Region goes away and is
410 // replaced by If's control input but because there's still a Phi,
411 // the Region stays in the graph. The top input from the cmpP is
412 // propagated forward and a subgraph that is useful goes away. The
413 // code below replaces the Phi with the MergeMem so that the Region
414 // is simplified.
415
416 PhiNode* phi = has_unique_phi();
417 if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) {
418 MergeMemNode* m = NULL;
419 assert(phi->req() == 3, "same as region");
420 for (uint i = 1; i < 3; ++i) {
421 Node *mem = phi->in(i);
422 if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) {
423 // Nothing is control-dependent on path #i except the region itself.
424 m = mem->as_MergeMem();
425 uint j = 3 - i;
426 Node* other = phi->in(j);
427 if (other && other == m->base_memory()) {
428 // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
429 // This will allow the diamond to collapse completely.
430 phase->is_IterGVN()->replace_node(phi, m);
431 return true;
432 }
433 }
434 }
435 }
436 return false;
437 }
438
439 //------------------------------Ideal------------------------------------------
440 // Return a node which is more "ideal" than the current node. Must preserve
441 // the CFG, but we can still strip out dead paths.
442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
443 if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy
444 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
445
446 // Check for RegionNode with no Phi users and both inputs come from either
447 // arm of the same IF. If found, then the control-flow split is useless.
448 bool has_phis = false;
449 if (can_reshape) { // Need DU info to check for Phi users
450 has_phis = (has_phi() != NULL); // Cache result
451 if (has_phis && try_clean_mem_phi(phase)) {
452 has_phis = false;
453 }
454
455 if (!has_phis) { // No Phi users? Nothing merging?
456 for (uint i = 1; i < req()-1; i++) {
457 Node *if1 = in(i);
458 if( !if1 ) continue;
459 Node *iff = if1->in(0);
460 if( !iff || !iff->is_If() ) continue;
461 for( uint j=i+1; j<req(); j++ ) {
462 if( in(j) && in(j)->in(0) == iff &&
463 if1->Opcode() != in(j)->Opcode() ) {
464 // Add the IF Projections to the worklist. They (and the IF itself)
465 // will be eliminated if dead.
466 phase->is_IterGVN()->add_users_to_worklist(iff);
467 set_req(i, iff->in(0));// Skip around the useless IF diamond
468 set_req(j, NULL);
469 return this; // Record progress
470 }
471 }
472 }
834 if (iff1 == iff2) {
835 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
836 igvn->replace_input_of(region, idx1, iff1->in(0));
837 igvn->replace_input_of(region, idx2, igvn->C->top());
838 return (region == this); // Remove useless if (both projections map to the same control/value)
839 }
840 BoolNode* bol1 = iff1->in(1)->isa_Bool();
841 BoolNode* bol2 = iff2->in(1)->isa_Bool();
842 if (bol1 == NULL || bol2 == NULL) {
843 return false; // No bool inputs found
844 }
845 Node* cmp1 = bol1->in(1);
846 Node* cmp2 = bol2->in(1);
847 bool commute = false;
848 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
849 return false; // No comparison
850 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
851 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
852 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
853 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
854 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
855 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
856 // SubTypeCheck is not commutative
857 return false;
858 } else if (cmp1 != cmp2) {
859 if (cmp1->in(1) == cmp2->in(2) &&
860 cmp1->in(2) == cmp2->in(1)) {
861 commute = true; // Same but swapped inputs, commute the test
862 } else {
863 return false; // Ifs are not comparing the same values
864 }
865 }
866 proj1 = proj1->other_if_proj();
867 proj2 = proj2->other_if_proj();
868 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
869 proj2->unique_ctrl_out_or_null() == this) ||
870 (proj2->unique_ctrl_out_or_null() == iff1 &&
871 proj1->unique_ctrl_out_or_null() == this))) {
872 return false; // Ifs are not connected through other projs
873 }
874 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
915
916 //=============================================================================
917 // note that these functions assume that the _adr_type field is flattened
918 uint PhiNode::hash() const {
919 const Type* at = _adr_type;
920 return TypeNode::hash() + (at ? at->hash() : 0);
921 }
922 bool PhiNode::cmp( const Node &n ) const {
923 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
924 }
925 static inline
926 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
927 if (at == NULL || at == TypePtr::BOTTOM) return at;
928 return Compile::current()->alias_type(at)->adr_type();
929 }
930
931 //----------------------------make---------------------------------------------
932 // create a new phi with edges matching r and set (initially) to x
933 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
934 uint preds = r->req(); // Number of predecessor paths
935 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
936 PhiNode* p = new PhiNode(r, t, at);
937 for (uint j = 1; j < preds; j++) {
938 // Fill in all inputs, except those which the region does not yet have
939 if (r->in(j) != NULL)
940 p->init_req(j, x);
941 }
942 return p;
943 }
944 PhiNode* PhiNode::make(Node* r, Node* x) {
945 const Type* t = x->bottom_type();
946 const TypePtr* at = NULL;
947 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
948 return make(r, x, t, at);
949 }
950 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
951 const Type* t = x->bottom_type();
952 const TypePtr* at = NULL;
953 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
954 return new PhiNode(r, t, at);
955 }
1052 np->as_Phi()->verify_adr_type(visited, at);
1053 } else if (n->bottom_type() == Type::TOP
1054 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1055 // ignore top inputs
1056 } else {
1057 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1058 // recheck phi/non-phi consistency at leaves:
1059 assert((nat != NULL) == (at != NULL), "");
1060 assert(nat == at || nat == TypePtr::BOTTOM,
1061 "adr_type must be consistent at leaves of phi nest");
1062 }
1063 }
1064 }
1065
1066 // Verify a whole nest of phis rooted at this one.
1067 void PhiNode::verify_adr_type(bool recursive) const {
1068 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1069 if (Node::in_dump()) return; // muzzle asserts when printing
1070
1071 assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");
1072
1073 if (!VerifyAliases) return; // verify thoroughly only if requested
1074
1075 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1076 "Phi::adr_type must be pre-normalized");
1077
1078 if (recursive) {
1079 VectorSet visited;
1080 verify_adr_type(visited, _adr_type);
1081 }
1082 }
1083 #endif
1084
1085
1086 //------------------------------Value------------------------------------------
1087 // Compute the type of the PhiNode
1088 const Type* PhiNode::Value(PhaseGVN* phase) const {
1089 Node *r = in(0); // RegionNode
1090 if( !r ) // Copy or dead
1091 return in(1) ? phase->type(in(1)) : Type::TOP;
1125 }
1126 }
1127 } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1128 in(LoopNode::EntryControl) != NULL &&
1129 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1130 // During CCP, if we saturate the type of a counted loop's Phi
1131 // before the special code for counted loop above has a chance
1132 // to run (that is as long as the type of the backedge's control
1133 // is top), we might end up with non monotonic types
1134 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1135 }
1136 }
1137
1138 // Until we have harmony between classes and interfaces in the type
1139 // lattice, we must tread carefully around phis which implicitly
1140 // convert the one to the other.
1141 const TypePtr* ttp = _type->make_ptr();
1142 const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1143 const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_instklassptr() : NULL;
1144 bool is_intf = false;
1145 if (ttip != NULL) {
1146 ciKlass* k = ttip->klass();
1147 if (k->is_loaded() && k->is_interface())
1148 is_intf = true;
1149 }
1150 if (ttkp != NULL) {
1151 ciKlass* k = ttkp->klass();
1152 if (k->is_loaded() && k->is_interface())
1153 is_intf = true;
1154 }
1155
1156 // Default case: merge all inputs
1157 const Type *t = Type::TOP; // Merged type starting value
1158 for (uint i = 1; i < req(); ++i) {// For all paths in
1159 // Reachable control path?
1160 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1161 const Type* ti = phase->type(in(i));
1162 // We assume that each input of an interface-valued Phi is a true
1163 // subtype of that interface. This might not be true of the meet
1164 // of all the input types. The lattice is not distributive in
1165 // such cases. Ward off asserts in type.cpp by refusing to do
1166 // meets between interfaces and proper classes.
1167 const TypePtr* tip = ti->make_ptr();
1168 const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1169 if (tiip) {
1170 bool ti_is_intf = false;
1171 ciKlass* k = tiip->klass();
1172 if (k->is_loaded() && k->is_interface())
1173 ti_is_intf = true;
1190 // (Occurrences of this case suggest improvements to Value methods.)
1191 //
1192 // It is not possible to see Type::BOTTOM values as phi inputs,
1193 // because the ciTypeFlow pre-pass produces verifier-quality types.
1194 const Type* ft = t->filter_speculative(_type); // Worst case type
1195
1196 #ifdef ASSERT
1197 // The following logic has been moved into TypeOopPtr::filter.
1198 const Type* jt = t->join_speculative(_type);
1199 if (jt->empty()) { // Emptied out???
1200
1201 // Check for evil case of 't' being a class and '_type' expecting an
1202 // interface. This can happen because the bytecodes do not contain
1203 // enough type info to distinguish a Java-level interface variable
1204 // from a Java-level object variable. If we meet 2 classes which
1205 // both implement interface I, but their meet is at 'j/l/O' which
1206 // doesn't implement I, we have no way to tell if the result should
1207 // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows
1208 // into a Phi which "knows" it's an Interface type we'll have to
1209 // uplift the type.
1210 if (!t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface()) {
1211 assert(ft == _type, ""); // Uplift to interface
1212 } else if (!t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1213 assert(ft == _type, ""); // Uplift to interface
1214 } else {
1215 // We also have to handle 'evil cases' of interface- vs. class-arrays
1216 Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1217 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1218 assert(ft == _type, ""); // Uplift to array of interface
1219 } else {
1220 // Otherwise it's something stupid like non-overlapping int ranges
1221 // found on dying counted loops.
1222 assert(ft == Type::TOP, ""); // Canonical empty value
1223 }
1224 }
1225 }
1226
1227 else {
1228
1229 // If we have an interface-typed Phi and we narrow to a class type, the join
1230 // should report back the class. However, if we have a J/L/Object
1231 // class-typed Phi and an interface flows in, it's possible that the meet &
1232 // join report an interface back out. This isn't possible but happens
1357 Node* PhiNode::Identity(PhaseGVN* phase) {
1358 // Check for no merging going on
1359 // (There used to be special-case code here when this->region->is_Loop.
1360 // It would check for a tributary phi on the backedge that the main phi
1361 // trivially, perhaps with a single cast. The unique_input method
1362 // does all this and more, by reducing such tributaries to 'this'.)
1363 Node* uin = unique_input(phase, false);
1364 if (uin != NULL) {
1365 return uin;
1366 }
1367
1368 int true_path = is_diamond_phi();
1369 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1370 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1371 Node* id = is_cmove_id(phase, true_path);
1372 if (id != NULL) {
1373 return id;
1374 }
1375 }
1376
1377 // Looking for phis with identical inputs. If we find one that has
1378 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1379 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1380 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1381 uint phi_len = req();
1382 Node* phi_reg = region();
1383 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1384 Node* u = phi_reg->fast_out(i);
1385 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1386 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1387 u->req() == phi_len) {
1388 for (uint j = 1; j < phi_len; j++) {
1389 if (in(j) != u->in(j)) {
1390 u = NULL;
1391 break;
1392 }
1393 }
1394 if (u != NULL) {
1395 return u;
1396 }
1879 } else if (rc->in(0)->in(1) != NULL &&
1880 rc->in(0)->in(1)->is_Bool()) {
1881 if (worklist.member(rc->in(0)->in(1))) {
1882 delay = true;
1883 } else if (rc->in(0)->in(1)->in(1) != NULL &&
1884 rc->in(0)->in(1)->in(1)->is_Cmp()) {
1885 if (worklist.member(rc->in(0)->in(1)->in(1))) {
1886 delay = true;
1887 }
1888 }
1889 }
1890 }
1891 }
1892 }
1893 if (delay) {
1894 worklist.push(this);
1895 }
1896 return delay;
1897 }
1898
1899 //------------------------------Ideal------------------------------------------
1900 // Return a node which is more "ideal" than the current node. Must preserve
1901 // the CFG, but we can still strip out dead paths.
1902 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1903 Node *r = in(0); // RegionNode
1904 assert(r != NULL && r->is_Region(), "this phi must have a region");
1905 assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1906
1907 // Note: During parsing, phis are often transformed before their regions.
1908 // This means we have to use type_or_null to defend against untyped regions.
1909 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1910 return NULL; // No change
1911
1912 Node *top = phase->C->top();
1913 bool new_phi = (outcnt() == 0); // transforming new Phi
1914 // No change for igvn if new phi is not hooked
1915 if (new_phi && can_reshape)
1916 return NULL;
1917
1918 // The are 2 situations when only one valid phi's input is left
2180 for (uint i = 1; i < req(); i++) {
2181 offset->init_req(i, in(i)->in(AddPNode::Offset));
2182 }
2183 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2184 }
2185 return new AddPNode(base, address, offset);
2186 }
2187 }
2188 }
2189
2190 // Split phis through memory merges, so that the memory merges will go away.
2191 // Piggy-back this transformation on the search for a unique input....
2192 // It will be as if the merged memory is the unique value of the phi.
2193 // (Do not attempt this optimization unless parsing is complete.
2194 // It would make the parser's memory-merge logic sick.)
2195 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2196 if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2197 // see if this phi should be sliced
2198 uint merge_width = 0;
2199 bool saw_self = false;
2200 for( uint i=1; i<req(); ++i ) {// For all paths in
2201 Node *ii = in(i);
2202 // TOP inputs should not be counted as safe inputs because if the
2203 // Phi references itself through all other inputs then splitting the
2204 // Phi through memory merges would create dead loop at later stage.
2205 if (ii == top) {
2206 return NULL; // Delay optimization until graph is cleaned.
2207 }
2208 if (ii->is_MergeMem()) {
2209 MergeMemNode* n = ii->as_MergeMem();
2210 merge_width = MAX2(merge_width, n->req());
2211 saw_self = saw_self || (n->base_memory() == this);
2212 }
2213 }
2214
2215 // This restriction is temporarily necessary to ensure termination:
2216 if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2217
2218 if (merge_width > Compile::AliasIdxRaw) {
2219 // found at least one non-empty MergeMem
2220 const TypePtr* at = adr_type();
2221 if (at != TypePtr::BOTTOM) {
2222 // Patch the existing phi to select an input from the merge:
2223 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2224 // Phi:AT1(...m1...)
2225 int alias_idx = phase->C->get_alias_index(at);
2226 for (uint i=1; i<req(); ++i) {
2227 Node *ii = in(i);
2228 if (ii->is_MergeMem()) {
2229 MergeMemNode* n = ii->as_MergeMem();
2230 // compress paths and change unreachable cycles to TOP
2231 // If not, we can update the input infinitely along a MergeMem cycle
2232 // Equivalent code is in MemNode::Ideal_common
2233 Node *m = phase->transform(n);
2234 if (outcnt() == 0) { // Above transform() may kill us!
2235 return top;
2236 }
2267 if (!saw_safe_input) {
2268 // There is a dead loop: All inputs are either dead or reference back to this phi
2269 return top;
2270 }
2271
2272 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2273 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2274 PhaseIterGVN* igvn = phase->is_IterGVN();
2275 assert(igvn != NULL, "sanity check");
2276 Node* hook = new Node(1);
2277 PhiNode* new_base = (PhiNode*) clone();
2278 // Must eagerly register phis, since they participate in loops.
2279 igvn->register_new_node_with_optimizer(new_base);
2280 hook->add_req(new_base);
2281
2282 MergeMemNode* result = MergeMemNode::make(new_base);
2283 for (uint i = 1; i < req(); ++i) {
2284 Node *ii = in(i);
2285 if (ii->is_MergeMem()) {
2286 MergeMemNode* n = ii->as_MergeMem();
2287 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2288 // If we have not seen this slice yet, make a phi for it.
2289 bool made_new_phi = false;
2290 if (mms.is_empty()) {
2291 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2292 made_new_phi = true;
2293 igvn->register_new_node_with_optimizer(new_phi);
2294 hook->add_req(new_phi);
2295 mms.set_memory(new_phi);
2296 }
2297 Node* phi = mms.memory();
2298 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2299 phi->set_req(i, mms.memory2());
2300 }
2301 }
2302 }
2303 // Distribute all self-loops.
2304 { // (Extra braces to hide mms.)
2305 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2306 Node* phi = mms.memory();
2385 if (is_decodeN) {
2386 new_ii = new EncodePNode(ii, narrow_t);
2387 } else {
2388 new_ii = new EncodePKlassNode(ii, narrow_t);
2389 }
2390 igvn->register_new_node_with_optimizer(new_ii);
2391 }
2392 }
2393 new_phi->set_req(i, new_ii);
2394 }
2395 igvn->register_new_node_with_optimizer(new_phi, this);
2396 if (is_decodeN) {
2397 progress = new DecodeNNode(new_phi, bottom_type());
2398 } else {
2399 progress = new DecodeNKlassNode(new_phi, bottom_type());
2400 }
2401 }
2402 }
2403 #endif
2404
2405 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2406 if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2407 progress = merge_through_phi(this, phase->is_IterGVN());
2408 }
2409
2410 return progress; // Return any progress
2411 }
2412
2413 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2414 Node_Stack stack(1);
2415 VectorSet visited;
2416 Node_List node_map;
2417
2418 stack.push(root_phi, 1); // ignore control
2419 visited.set(root_phi->_idx);
2420
2421 Node* new_phi = new PhiNode(root_phi->in(0), t);
2422 node_map.map(root_phi->_idx, new_phi);
2423
2424 while (stack.is_nonempty()) {
2477 }
2478 } else if (in->Opcode() == Op_VectorBox) {
2479 VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2480 if (cached_vbox == NULL) {
2481 cached_vbox = vbox;
2482 } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2483 // TODO: vector type mismatch can be handled with additional reinterpret casts
2484 assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2485 return NULL; // not optimizable: vector type mismatch
2486 } else if (vbox->box_type() != cached_vbox->box_type()) {
2487 assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2488 return NULL; // not optimizable: box type mismatch
2489 }
2490 } else {
2491 return NULL; // not optimizable: neither Phi nor VectorBox
2492 }
2493 } else {
2494 stack.pop();
2495 }
2496 }
2497 assert(cached_vbox != NULL, "sanity");
2498 const TypeInstPtr* btype = cached_vbox->box_type();
2499 const TypeVect* vtype = cached_vbox->vec_type();
2500 Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box, igvn);
2501 Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2502 return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2503 }
2504
2505 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2506 // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2507 // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2508 // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2509 assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2510 const bool is_loop = (r->is_Loop() && r->req() == 3);
2511 const Node* top = phase->C->top();
2512 if (is_loop) {
2513 return !uin->eqv_uncast(in(LoopNode::EntryControl));
2514 } else {
2515 // We have a data loop either with an unsafe data reference or if a region is unreachable.
2516 return is_unsafe_data_reference(uin)
2757 return in(0)->in(0);
2758 }
2759
2760
2761 #ifndef PRODUCT
2762 void CatchProjNode::dump_spec(outputStream *st) const {
2763 ProjNode::dump_spec(st);
2764 st->print("@bci %d ",_handler_bci);
2765 }
2766 #endif
2767
2768 //=============================================================================
2769 //------------------------------Identity---------------------------------------
2770 // Check for CreateEx being Identity.
2771 Node* CreateExNode::Identity(PhaseGVN* phase) {
2772 if( phase->type(in(1)) == Type::TOP ) return in(1);
2773 if( phase->type(in(0)) == Type::TOP ) return in(0);
2774 // We only come from CatchProj, unless the CatchProj goes away.
2775 // If the CatchProj is optimized away, then we just carry the
2776 // exception oop through.
2777 CallNode *call = in(1)->in(0)->as_Call();
2778
2779 return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2780 ? this
2781 : call->in(TypeFunc::Parms);
2782 }
2783
2784 //=============================================================================
2785 //------------------------------Value------------------------------------------
2786 // Check for being unreachable.
2787 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2788 if (!in(0) || in(0)->is_top()) return Type::TOP;
2789 return bottom_type();
2790 }
2791
2792 //------------------------------Ideal------------------------------------------
2793 // Check for no longer being part of a loop
2794 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2795 if (can_reshape && !in(0)->is_Region()) {
2796 // Dead code elimination can sometimes delete this projection so
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "opto/vectornode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
56 const Type* RegionNode::Value(PhaseGVN* phase) const {
377 Node *n = (Node*)phase->C->root();
378 nstack.push(n);
379 visited.set(n->_idx);
380 while (nstack.size() != 0) {
381 n = nstack.pop();
382 uint max = n->outcnt();
383 for (uint i = 0; i < max; i++) {
384 Node* m = n->raw_out(i);
385 if (m != NULL && m->is_CFG()) {
386 if (m == this) {
387 return false; // We reached the Region node - it is not dead.
388 }
389 if (!visited.test_set(m->_idx))
390 nstack.push(m);
391 }
392 }
393 }
394 return true; // The Region node is unreachable - it is dead.
395 }
396
397 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) {
398 // Incremental inlining + PhaseStringOpts sometimes produce:
399 //
400 // cmpP with 1 top input
401 // |
402 // If
403 // / \
404 // IfFalse IfTrue /- Some Node
405 // \ / / /
406 // Region / /-MergeMem
407 // \---Phi
408 //
409 //
410 // It's expected by PhaseStringOpts that the Region goes away and is
411 // replaced by If's control input but because there's still a Phi,
412 // the Region stays in the graph. The top input from the cmpP is
413 // propagated forward and a subgraph that is useful goes away. The
414 // code below replaces the Phi with the MergeMem so that the Region
415 // is simplified.
416
417 if (type() == Type::MEMORY && is_diamond_phi(true)) {
418 MergeMemNode* m = NULL;
419 assert(req() == 3, "same as region");
420 Node* r = in(0);
421 for (uint i = 1; i < 3; ++i) {
422 Node *mem = in(i);
423 if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) {
424 // Nothing is control-dependent on path #i except the region itself.
425 m = mem->as_MergeMem();
426 uint j = 3 - i;
427 Node* other = in(j);
428 if (other && other == m->base_memory()) {
429 // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
430 // This will allow the diamond to collapse completely.
431 return m;
432 }
433 }
434 }
435 }
436 return NULL;
437 }
438
439 //------------------------------Ideal------------------------------------------
440 // Return a node which is more "ideal" than the current node. Must preserve
441 // the CFG, but we can still strip out dead paths.
442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
443 if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy
444 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
445
446 // Check for RegionNode with no Phi users and both inputs come from either
447 // arm of the same IF. If found, then the control-flow split is useless.
448 bool has_phis = false;
449 if (can_reshape) { // Need DU info to check for Phi users
450 has_phis = (has_phi() != NULL); // Cache result
451 if (has_phis) {
452 PhiNode* phi = has_unique_phi();
453 if (phi != NULL) {
454 Node* m = phi->try_clean_mem_phi(phase);
455 if (m != NULL) {
456 phase->is_IterGVN()->replace_node(phi, m);
457 has_phis = false;
458 }
459 }
460 }
461
462 if (!has_phis) { // No Phi users? Nothing merging?
463 for (uint i = 1; i < req()-1; i++) {
464 Node *if1 = in(i);
465 if( !if1 ) continue;
466 Node *iff = if1->in(0);
467 if( !iff || !iff->is_If() ) continue;
468 for( uint j=i+1; j<req(); j++ ) {
469 if( in(j) && in(j)->in(0) == iff &&
470 if1->Opcode() != in(j)->Opcode() ) {
471 // Add the IF Projections to the worklist. They (and the IF itself)
472 // will be eliminated if dead.
473 phase->is_IterGVN()->add_users_to_worklist(iff);
474 set_req(i, iff->in(0));// Skip around the useless IF diamond
475 set_req(j, NULL);
476 return this; // Record progress
477 }
478 }
479 }
841 if (iff1 == iff2) {
842 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
843 igvn->replace_input_of(region, idx1, iff1->in(0));
844 igvn->replace_input_of(region, idx2, igvn->C->top());
845 return (region == this); // Remove useless if (both projections map to the same control/value)
846 }
847 BoolNode* bol1 = iff1->in(1)->isa_Bool();
848 BoolNode* bol2 = iff2->in(1)->isa_Bool();
849 if (bol1 == NULL || bol2 == NULL) {
850 return false; // No bool inputs found
851 }
852 Node* cmp1 = bol1->in(1);
853 Node* cmp2 = bol2->in(1);
854 bool commute = false;
855 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
856 return false; // No comparison
857 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
858 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
859 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
860 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
861 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
862 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
863 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
864 // SubTypeCheck is not commutative
865 return false;
866 } else if (cmp1 != cmp2) {
867 if (cmp1->in(1) == cmp2->in(2) &&
868 cmp1->in(2) == cmp2->in(1)) {
869 commute = true; // Same but swapped inputs, commute the test
870 } else {
871 return false; // Ifs are not comparing the same values
872 }
873 }
874 proj1 = proj1->other_if_proj();
875 proj2 = proj2->other_if_proj();
876 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
877 proj2->unique_ctrl_out_or_null() == this) ||
878 (proj2->unique_ctrl_out_or_null() == iff1 &&
879 proj1->unique_ctrl_out_or_null() == this))) {
880 return false; // Ifs are not connected through other projs
881 }
882 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
923
924 //=============================================================================
925 // note that these functions assume that the _adr_type field is flattened
926 uint PhiNode::hash() const {
927 const Type* at = _adr_type;
928 return TypeNode::hash() + (at ? at->hash() : 0);
929 }
930 bool PhiNode::cmp( const Node &n ) const {
931 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
932 }
933 static inline
934 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
935 if (at == NULL || at == TypePtr::BOTTOM) return at;
936 return Compile::current()->alias_type(at)->adr_type();
937 }
938
939 //----------------------------make---------------------------------------------
940 // create a new phi with edges matching r and set (initially) to x
941 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
942 uint preds = r->req(); // Number of predecessor paths
943 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at");
944 PhiNode* p = new PhiNode(r, t, at);
945 for (uint j = 1; j < preds; j++) {
946 // Fill in all inputs, except those which the region does not yet have
947 if (r->in(j) != NULL)
948 p->init_req(j, x);
949 }
950 return p;
951 }
952 PhiNode* PhiNode::make(Node* r, Node* x) {
953 const Type* t = x->bottom_type();
954 const TypePtr* at = NULL;
955 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
956 return make(r, x, t, at);
957 }
958 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
959 const Type* t = x->bottom_type();
960 const TypePtr* at = NULL;
961 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
962 return new PhiNode(r, t, at);
963 }
1060 np->as_Phi()->verify_adr_type(visited, at);
1061 } else if (n->bottom_type() == Type::TOP
1062 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1063 // ignore top inputs
1064 } else {
1065 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1066 // recheck phi/non-phi consistency at leaves:
1067 assert((nat != NULL) == (at != NULL), "");
1068 assert(nat == at || nat == TypePtr::BOTTOM,
1069 "adr_type must be consistent at leaves of phi nest");
1070 }
1071 }
1072 }
1073
1074 // Verify a whole nest of phis rooted at this one.
1075 void PhiNode::verify_adr_type(bool recursive) const {
1076 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1077 if (Node::in_dump()) return; // muzzle asserts when printing
1078
1079 assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");
1080 // Flat array element shouldn't get their own memory slice until flattened_accesses_share_alias is cleared.
1081 // It could be the graph has no loads/stores and flattened_accesses_share_alias is never cleared. EA could still
1082 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1083 assert(_adr_type == NULL || _adr_type->isa_aryptr() == NULL ||
1084 _adr_type->is_aryptr()->is_known_instance() ||
1085 !_adr_type->is_aryptr()->is_flat() ||
1086 !Compile::current()->flattened_accesses_share_alias() ||
1087 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1088
1089 if (!VerifyAliases) return; // verify thoroughly only if requested
1090
1091 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1092 "Phi::adr_type must be pre-normalized");
1093
1094 if (recursive) {
1095 VectorSet visited;
1096 verify_adr_type(visited, _adr_type);
1097 }
1098 }
1099 #endif
1100
1101
1102 //------------------------------Value------------------------------------------
1103 // Compute the type of the PhiNode
1104 const Type* PhiNode::Value(PhaseGVN* phase) const {
1105 Node *r = in(0); // RegionNode
1106 if( !r ) // Copy or dead
1107 return in(1) ? phase->type(in(1)) : Type::TOP;
1141 }
1142 }
1143 } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1144 in(LoopNode::EntryControl) != NULL &&
1145 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1146 // During CCP, if we saturate the type of a counted loop's Phi
1147 // before the special code for counted loop above has a chance
1148 // to run (that is as long as the type of the backedge's control
1149 // is top), we might end up with non monotonic types
1150 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1151 }
1152 }
1153
1154 // Until we have harmony between classes and interfaces in the type
1155 // lattice, we must tread carefully around phis which implicitly
1156 // convert the one to the other.
1157 const TypePtr* ttp = _type->make_ptr();
1158 const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1159 const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_instklassptr() : NULL;
1160 bool is_intf = false;
1161 if (ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1162 is_intf = true;
1163 } else if (ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1164 is_intf = true;
1165 }
1166
1167 // Default case: merge all inputs
1168 const Type *t = Type::TOP; // Merged type starting value
1169 for (uint i = 1; i < req(); ++i) {// For all paths in
1170 // Reachable control path?
1171 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1172 const Type* ti = phase->type(in(i));
1173 // We assume that each input of an interface-valued Phi is a true
1174 // subtype of that interface. This might not be true of the meet
1175 // of all the input types. The lattice is not distributive in
1176 // such cases. Ward off asserts in type.cpp by refusing to do
1177 // meets between interfaces and proper classes.
1178 const TypePtr* tip = ti->make_ptr();
1179 const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1180 if (tiip) {
1181 bool ti_is_intf = false;
1182 ciKlass* k = tiip->klass();
1183 if (k->is_loaded() && k->is_interface())
1184 ti_is_intf = true;
1201 // (Occurrences of this case suggest improvements to Value methods.)
1202 //
1203 // It is not possible to see Type::BOTTOM values as phi inputs,
1204 // because the ciTypeFlow pre-pass produces verifier-quality types.
1205 const Type* ft = t->filter_speculative(_type); // Worst case type
1206
1207 #ifdef ASSERT
1208 // The following logic has been moved into TypeOopPtr::filter.
1209 const Type* jt = t->join_speculative(_type);
1210 if (jt->empty()) { // Emptied out???
1211
1212 // Check for evil case of 't' being a class and '_type' expecting an
1213 // interface. This can happen because the bytecodes do not contain
1214 // enough type info to distinguish a Java-level interface variable
1215 // from a Java-level object variable. If we meet 2 classes which
1216 // both implement interface I, but their meet is at 'j/l/O' which
1217 // doesn't implement I, we have no way to tell if the result should
1218 // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows
1219 // into a Phi which "knows" it's an Interface type we'll have to
1220 // uplift the type.
1221 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1222 assert(ft == _type, ""); // Uplift to interface
1223 } else if (!t->empty() && ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1224 assert(ft == _type, ""); // Uplift to interface
1225 } else {
1226 // We also have to handle 'evil cases' of interface- vs. class-arrays
1227 Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1228 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1229 assert(ft == _type, ""); // Uplift to array of interface
1230 } else {
1231 // Otherwise it's something stupid like non-overlapping int ranges
1232 // found on dying counted loops.
1233 assert(ft == Type::TOP, ""); // Canonical empty value
1234 }
1235 }
1236 }
1237
1238 else {
1239
1240 // If we have an interface-typed Phi and we narrow to a class type, the join
1241 // should report back the class. However, if we have a J/L/Object
1242 // class-typed Phi and an interface flows in, it's possible that the meet &
1243 // join report an interface back out. This isn't possible but happens
1368 Node* PhiNode::Identity(PhaseGVN* phase) {
1369 // Check for no merging going on
1370 // (There used to be special-case code here when this->region->is_Loop.
1371 // It would check for a tributary phi on the backedge that the main phi
1372 // trivially, perhaps with a single cast. The unique_input method
1373 // does all this and more, by reducing such tributaries to 'this'.)
1374 Node* uin = unique_input(phase, false);
1375 if (uin != NULL) {
1376 return uin;
1377 }
1378
1379 int true_path = is_diamond_phi();
1380 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1381 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1382 Node* id = is_cmove_id(phase, true_path);
1383 if (id != NULL) {
1384 return id;
1385 }
1386 }
1387
1388 if (phase->is_IterGVN()) {
1389 Node* m = try_clean_mem_phi(phase);
1390 if (m != NULL) {
1391 return m;
1392 }
1393 }
1394
1395
1396 // Looking for phis with identical inputs. If we find one that has
1397 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1398 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1399 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1400 uint phi_len = req();
1401 Node* phi_reg = region();
1402 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1403 Node* u = phi_reg->fast_out(i);
1404 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1405 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1406 u->req() == phi_len) {
1407 for (uint j = 1; j < phi_len; j++) {
1408 if (in(j) != u->in(j)) {
1409 u = NULL;
1410 break;
1411 }
1412 }
1413 if (u != NULL) {
1414 return u;
1415 }
1898 } else if (rc->in(0)->in(1) != NULL &&
1899 rc->in(0)->in(1)->is_Bool()) {
1900 if (worklist.member(rc->in(0)->in(1))) {
1901 delay = true;
1902 } else if (rc->in(0)->in(1)->in(1) != NULL &&
1903 rc->in(0)->in(1)->in(1)->is_Cmp()) {
1904 if (worklist.member(rc->in(0)->in(1)->in(1))) {
1905 delay = true;
1906 }
1907 }
1908 }
1909 }
1910 }
1911 }
1912 if (delay) {
1913 worklist.push(this);
1914 }
1915 return delay;
1916 }
1917
1918 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
1919 InlineTypeBaseNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk, bool is_init) {
1920 InlineTypeBaseNode* vt = NULL;
1921 if (_type->isa_ptr()) {
1922 vt = InlineTypePtrNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init);
1923 } else {
1924 vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init);
1925 }
1926 if (can_reshape) {
1927 // Replace phi right away to be able to use the inline
1928 // type node when reaching the phi again through data loops.
1929 PhaseIterGVN* igvn = phase->is_IterGVN();
1930 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1931 Node* u = fast_out(i);
1932 igvn->rehash_node_delayed(u);
1933 imax -= u->replace_edge(this, vt);
1934 --i;
1935 }
1936 assert(outcnt() == 0, "should be dead now");
1937 }
1938 ResourceMark rm;
1939 Node_List casts;
1940 for (uint i = 1; i < req(); ++i) {
1941 Node* n = in(i);
1942 while (n->is_ConstraintCast()) {
1943 casts.push(n);
1944 n = n->in(1);
1945 }
1946 if (phase->type(n)->is_zero_type()) {
1947 n = InlineTypePtrNode::make_null(*phase, vk);
1948 } else if (n->is_Phi()) {
1949 assert(can_reshape, "can only handle phis during IGVN");
1950 n = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk, is_init));
1951 }
1952 while (casts.size() != 0) {
1953 // Push the cast(s) through the InlineTypePtrNode
1954 Node* cast = casts.pop()->clone();
1955 cast->set_req_X(1, n->as_InlineTypePtr()->get_oop(), phase);
1956 n = n->clone();
1957 n->as_InlineTypePtr()->set_oop(phase->transform(cast));
1958 n = phase->transform(n);
1959 }
1960 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
1961 vt->merge_with(phase, n->as_InlineTypeBase(), i, transform);
1962 }
1963 return vt;
1964 }
1965
1966 //------------------------------Ideal------------------------------------------
1967 // Return a node which is more "ideal" than the current node. Must preserve
1968 // the CFG, but we can still strip out dead paths.
1969 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1970 Node *r = in(0); // RegionNode
1971 assert(r != NULL && r->is_Region(), "this phi must have a region");
1972 assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1973
1974 // Note: During parsing, phis are often transformed before their regions.
1975 // This means we have to use type_or_null to defend against untyped regions.
1976 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1977 return NULL; // No change
1978
1979 Node *top = phase->C->top();
1980 bool new_phi = (outcnt() == 0); // transforming new Phi
1981 // No change for igvn if new phi is not hooked
1982 if (new_phi && can_reshape)
1983 return NULL;
1984
1985 // The are 2 situations when only one valid phi's input is left
2247 for (uint i = 1; i < req(); i++) {
2248 offset->init_req(i, in(i)->in(AddPNode::Offset));
2249 }
2250 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2251 }
2252 return new AddPNode(base, address, offset);
2253 }
2254 }
2255 }
2256
2257 // Split phis through memory merges, so that the memory merges will go away.
2258 // Piggy-back this transformation on the search for a unique input....
2259 // It will be as if the merged memory is the unique value of the phi.
2260 // (Do not attempt this optimization unless parsing is complete.
2261 // It would make the parser's memory-merge logic sick.)
2262 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2263 if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2264 // see if this phi should be sliced
2265 uint merge_width = 0;
2266 bool saw_self = false;
2267 // TODO revisit this with JDK-8247216
2268 bool mergemem_only = true;
2269 for( uint i=1; i<req(); ++i ) {// For all paths in
2270 Node *ii = in(i);
2271 // TOP inputs should not be counted as safe inputs because if the
2272 // Phi references itself through all other inputs then splitting the
2273 // Phi through memory merges would create dead loop at later stage.
2274 if (ii == top) {
2275 return NULL; // Delay optimization until graph is cleaned.
2276 }
2277 if (ii->is_MergeMem()) {
2278 MergeMemNode* n = ii->as_MergeMem();
2279 merge_width = MAX2(merge_width, n->req());
2280 saw_self = saw_self || (n->base_memory() == this);
2281 } else {
2282 mergemem_only = false;
2283 }
2284 }
2285
2286 // This restriction is temporarily necessary to ensure termination:
2287 if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2288
2289 if (merge_width > Compile::AliasIdxRaw) {
2290 // found at least one non-empty MergeMem
2291 const TypePtr* at = adr_type();
2292 if (at != TypePtr::BOTTOM) {
2293 // Patch the existing phi to select an input from the merge:
2294 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2295 // Phi:AT1(...m1...)
2296 int alias_idx = phase->C->get_alias_index(at);
2297 for (uint i=1; i<req(); ++i) {
2298 Node *ii = in(i);
2299 if (ii->is_MergeMem()) {
2300 MergeMemNode* n = ii->as_MergeMem();
2301 // compress paths and change unreachable cycles to TOP
2302 // If not, we can update the input infinitely along a MergeMem cycle
2303 // Equivalent code is in MemNode::Ideal_common
2304 Node *m = phase->transform(n);
2305 if (outcnt() == 0) { // Above transform() may kill us!
2306 return top;
2307 }
2338 if (!saw_safe_input) {
2339 // There is a dead loop: All inputs are either dead or reference back to this phi
2340 return top;
2341 }
2342
2343 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2344 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2345 PhaseIterGVN* igvn = phase->is_IterGVN();
2346 assert(igvn != NULL, "sanity check");
2347 Node* hook = new Node(1);
2348 PhiNode* new_base = (PhiNode*) clone();
2349 // Must eagerly register phis, since they participate in loops.
2350 igvn->register_new_node_with_optimizer(new_base);
2351 hook->add_req(new_base);
2352
2353 MergeMemNode* result = MergeMemNode::make(new_base);
2354 for (uint i = 1; i < req(); ++i) {
2355 Node *ii = in(i);
2356 if (ii->is_MergeMem()) {
2357 MergeMemNode* n = ii->as_MergeMem();
2358 if (igvn) {
2359 // TODO revisit this with JDK-8247216
2360 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2361 igvn->_worklist.push(n);
2362 }
2363 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2364 // If we have not seen this slice yet, make a phi for it.
2365 bool made_new_phi = false;
2366 if (mms.is_empty()) {
2367 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2368 made_new_phi = true;
2369 igvn->register_new_node_with_optimizer(new_phi);
2370 hook->add_req(new_phi);
2371 mms.set_memory(new_phi);
2372 }
2373 Node* phi = mms.memory();
2374 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2375 phi->set_req(i, mms.memory2());
2376 }
2377 }
2378 }
2379 // Distribute all self-loops.
2380 { // (Extra braces to hide mms.)
2381 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2382 Node* phi = mms.memory();
2461 if (is_decodeN) {
2462 new_ii = new EncodePNode(ii, narrow_t);
2463 } else {
2464 new_ii = new EncodePKlassNode(ii, narrow_t);
2465 }
2466 igvn->register_new_node_with_optimizer(new_ii);
2467 }
2468 }
2469 new_phi->set_req(i, new_ii);
2470 }
2471 igvn->register_new_node_with_optimizer(new_phi, this);
2472 if (is_decodeN) {
2473 progress = new DecodeNNode(new_phi, bottom_type());
2474 } else {
2475 progress = new DecodeNKlassNode(new_phi, bottom_type());
2476 }
2477 }
2478 }
2479 #endif
2480
2481 // Check recursively if inputs are either an inline type, constant null
2482 // or another Phi (including self references through data loops). If so,
2483 // push the inline types down through the phis to enable folding of loads.
2484 if (EnableValhalla && (_type->isa_ptr() || _type->isa_inlinetype()) && req() > 2) {
2485 ResourceMark rm;
2486 Unique_Node_List worklist;
2487 worklist.push(this);
2488 bool can_optimize = true;
2489 ciInlineKlass* vk = NULL;
2490 // true if all IsInit inputs of all InlineType* nodes are true
2491 bool is_init = true;
2492 Node_List casts;
2493
2494 // TODO 8284443 We need to prevent endless pushing through
2495 // TestLWorld -XX:+UseZGC -DScenarios=0 -DTest=test69
2496 // TestLWorld -XX:-TieredCompilation -XX:-DoEscapeAnalysis -XX:+AlwaysIncrementalInline
2497 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2498 Node* n = fast_out(i);
2499 if (n->is_InlineTypePtr() && n->in(1) == this) {
2500 can_optimize = false;
2501 break;
2502 }
2503 }
2504 // TODO 8284443 We could revisit the same node over and over again, right?
2505 for (uint next = 0; next < worklist.size() && can_optimize; next++) {
2506 Node* phi = worklist.at(next);
2507 for (uint i = 1; i < phi->req() && can_optimize; i++) {
2508 Node* n = phi->in(i);
2509 if (n == NULL) {
2510 can_optimize = false;
2511 break;
2512 }
2513 while (n->is_ConstraintCast()) {
2514 if (n->in(0) != NULL && n->in(0)->is_top()) {
2515 // Will die, don't optimize
2516 can_optimize = false;
2517 break;
2518 }
2519 casts.push(n);
2520 n = n->in(1);
2521 }
2522 const Type* t = phase->type(n);
2523 if (n->is_InlineTypeBase() && n->as_InlineTypeBase()->can_merge() &&
2524 (vk == NULL || vk == t->inline_klass())) {
2525 vk = (vk == NULL) ? t->inline_klass() : vk;
2526 if (phase->find_int_con(n->as_InlineTypeBase()->get_is_init(), 0) != 1) {
2527 is_init = false;
2528 }
2529 } else if (n->is_Phi() && can_reshape && (n->bottom_type()->isa_ptr() || n->bottom_type()->isa_inlinetype())) {
2530 worklist.push(n);
2531 } else if (t->is_zero_type()) {
2532 is_init = false;
2533 } else {
2534 can_optimize = false;
2535 }
2536 }
2537 }
2538 // Check if cast nodes can be pushed through
2539 const Type* t = Type::get_const_type(vk);
2540 while (casts.size() != 0 && can_optimize && t != NULL) {
2541 Node* cast = casts.pop();
2542 if (t->filter(cast->bottom_type()) == Type::TOP) {
2543 can_optimize = false;
2544 }
2545 }
2546 if (can_optimize && vk != NULL) {
2547 // TODO 8275400
2548 // assert(!_type->isa_ptr() || _type->maybe_null() || is_init, "Phi not null but a possible null was seen");
2549 return push_inline_types_through(phase, can_reshape, vk, is_init);
2550 }
2551 }
2552
2553 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2554 if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2555 progress = merge_through_phi(this, phase->is_IterGVN());
2556 }
2557
2558 return progress; // Return any progress
2559 }
2560
2561 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2562 Node_Stack stack(1);
2563 VectorSet visited;
2564 Node_List node_map;
2565
2566 stack.push(root_phi, 1); // ignore control
2567 visited.set(root_phi->_idx);
2568
2569 Node* new_phi = new PhiNode(root_phi->in(0), t);
2570 node_map.map(root_phi->_idx, new_phi);
2571
2572 while (stack.is_nonempty()) {
2625 }
2626 } else if (in->Opcode() == Op_VectorBox) {
2627 VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2628 if (cached_vbox == NULL) {
2629 cached_vbox = vbox;
2630 } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2631 // TODO: vector type mismatch can be handled with additional reinterpret casts
2632 assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2633 return NULL; // not optimizable: vector type mismatch
2634 } else if (vbox->box_type() != cached_vbox->box_type()) {
2635 assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2636 return NULL; // not optimizable: box type mismatch
2637 }
2638 } else {
2639 return NULL; // not optimizable: neither Phi nor VectorBox
2640 }
2641 } else {
2642 stack.pop();
2643 }
2644 }
2645
2646 assert(cached_vbox != NULL, "sanity");
2647 const TypeInstPtr* btype = cached_vbox->box_type();
2648 const TypeVect* vtype = cached_vbox->vec_type();
2649 Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box, igvn);
2650 Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2651 return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2652 }
2653
2654 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2655 // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2656 // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2657 // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2658 assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2659 const bool is_loop = (r->is_Loop() && r->req() == 3);
2660 const Node* top = phase->C->top();
2661 if (is_loop) {
2662 return !uin->eqv_uncast(in(LoopNode::EntryControl));
2663 } else {
2664 // We have a data loop either with an unsafe data reference or if a region is unreachable.
2665 return is_unsafe_data_reference(uin)
2906 return in(0)->in(0);
2907 }
2908
2909
2910 #ifndef PRODUCT
2911 void CatchProjNode::dump_spec(outputStream *st) const {
2912 ProjNode::dump_spec(st);
2913 st->print("@bci %d ",_handler_bci);
2914 }
2915 #endif
2916
2917 //=============================================================================
2918 //------------------------------Identity---------------------------------------
2919 // Check for CreateEx being Identity.
2920 Node* CreateExNode::Identity(PhaseGVN* phase) {
2921 if( phase->type(in(1)) == Type::TOP ) return in(1);
2922 if( phase->type(in(0)) == Type::TOP ) return in(0);
2923 // We only come from CatchProj, unless the CatchProj goes away.
2924 // If the CatchProj is optimized away, then we just carry the
2925 // exception oop through.
2926
2927 // CheckCastPPNode::Ideal() for inline types reuses the exception
2928 // paths of a call to perform an allocation: we can see a Phi here.
2929 if (in(1)->is_Phi()) {
2930 return this;
2931 }
2932 CallNode *call = in(1)->in(0)->as_Call();
2933
2934 return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2935 ? this
2936 : call->in(TypeFunc::Parms);
2937 }
2938
2939 //=============================================================================
2940 //------------------------------Value------------------------------------------
2941 // Check for being unreachable.
2942 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2943 if (!in(0) || in(0)->is_top()) return Type::TOP;
2944 return bottom_type();
2945 }
2946
2947 //------------------------------Ideal------------------------------------------
2948 // Check for no longer being part of a loop
2949 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2950 if (can_reshape && !in(0)->is_Region()) {
2951 // Dead code elimination can sometimes delete this projection so
|