1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahRuntime.hpp"
  33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/block.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  45   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  46   if ((state->enqueue_barriers_count() +
  47        state->load_reference_barriers_count()) > 0) {
  48     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  49     C->clear_major_progress();
  50     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  51     if (C->failing()) return false;
  52     PhaseIdealLoop::verify(igvn);
  53     DEBUG_ONLY(verify_raw_mem(C->root());)
  54     if (attempt_more_loopopts) {
  55       C->set_major_progress();
  56       int cnt = 0;
  57       if (!C->optimize_loops(cnt, igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61       if (C->range_check_cast_count() > 0) {
  62         // No more loop optimizations. Remove all range check dependent CastIINodes.
  63         C->remove_range_check_casts(igvn);
  64         igvn.optimize();
  65       }
  66     }
  67   }
  68   return true;
  69 }
  70 
  71 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  72   if (!UseShenandoahGC) {
  73     return false;
  74   }
  75   assert(iff->is_If(), "bad input");
  76   if (iff->Opcode() != Op_If) {
  77     return false;
  78   }
  79   Node* bol = iff->in(1);
  80   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  81     return false;
  82   }
  83   Node* cmp = bol->in(1);
  84   if (cmp->Opcode() != Op_CmpI) {
  85     return false;
  86   }
  87   Node* in1 = cmp->in(1);
  88   Node* in2 = cmp->in(2);
  89   if (in2->find_int_con(-1) != 0) {
  90     return false;
  91   }
  92   if (in1->Opcode() != Op_AndI) {
  93     return false;
  94   }
  95   in2 = in1->in(2);
  96   if (in2->find_int_con(-1) != mask) {
  97     return false;
  98   }
  99   in1 = in1->in(1);
 100 
 101   return is_gc_state_load(in1);
 102 }
 103 
 104 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 105   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 106 }
 107 
 108 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 109   if (!UseShenandoahGC) {
 110     return false;
 111   }
 112   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 113     return false;
 114   }
 115   Node* addp = n->in(MemNode::Address);
 116   if (!addp->is_AddP()) {
 117     return false;
 118   }
 119   Node* base = addp->in(AddPNode::Address);
 120   Node* off = addp->in(AddPNode::Offset);
 121   if (base->Opcode() != Op_ThreadLocal) {
 122     return false;
 123   }
 124   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 125     return false;
 126   }
 127   return true;
 128 }
 129 
 130 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 131   assert(phase->is_dominator(stop, start), "bad inputs");
 132   ResourceMark rm;
 133   Unique_Node_List wq;
 134   wq.push(start);
 135   for (uint next = 0; next < wq.size(); next++) {
 136     Node *m = wq.at(next);
 137     if (m == stop) {
 138       continue;
 139     }
 140     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 141       return true;
 142     }
 143     if (m->is_Region()) {
 144       for (uint i = 1; i < m->req(); i++) {
 145         wq.push(m->in(i));
 146       }
 147     } else {
 148       wq.push(m->in(0));
 149     }
 150   }
 151   return false;
 152 }
 153 
 154 #ifdef ASSERT
 155 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 156   assert(phis.size() == 0, "");
 157 
 158   while (true) {
 159     if (in->bottom_type() == TypePtr::NULL_PTR) {
 160       if (trace) {tty->print_cr("NULL");}
 161     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 162       if (trace) {tty->print_cr("Non oop");}
 163     } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) {
 164       if (trace) {tty->print_cr("Java mirror");}
 165     } else {
 166       if (in->is_ConstraintCast()) {
 167         in = in->in(1);
 168         continue;
 169       } else if (in->is_AddP()) {
 170         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 171         in = in->in(AddPNode::Address);
 172         continue;
 173       } else if (in->is_Con()) {
 174         if (trace) {
 175           tty->print("Found constant");
 176           in->dump();
 177         }
 178       } else if (in->Opcode() == Op_Parm) {
 179         if (trace) {
 180           tty->print("Found argument");
 181         }
 182       } else if (in->Opcode() == Op_CreateEx) {
 183         if (trace) {
 184           tty->print("Found create-exception");
 185         }
 186       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 187         if (trace) {
 188           tty->print("Found raw LoadP (OSR argument?)");
 189         }
 190       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 191         if (t == ShenandoahOopStore) {
 192           uint i = 0;
 193           for (; i < phis.size(); i++) {
 194             Node* n = phis.node_at(i);
 195             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 196               break;
 197             }
 198           }
 199           if (i == phis.size()) {
 200             return false;
 201           }
 202         }
 203         barriers_used.push(in);
 204         if (trace) {tty->print("Found barrier"); in->dump();}
 205       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 206         if (t != ShenandoahOopStore) {
 207           in = in->in(1);
 208           continue;
 209         }
 210         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 211         phis.push(in, in->req());
 212         in = in->in(1);
 213         continue;
 214       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 215         if (trace) {
 216           tty->print("Found alloc");
 217           in->in(0)->dump();
 218         }
 219       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 220         if (trace) {
 221           tty->print("Found Java call");
 222         }
 223       } else if (in->is_Phi()) {
 224         if (!visited.test_set(in->_idx)) {
 225           if (trace) {tty->print("Pushed phi:"); in->dump();}
 226           phis.push(in, 2);
 227           in = in->in(1);
 228           continue;
 229         }
 230         if (trace) {tty->print("Already seen phi:"); in->dump();}
 231       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 232         if (!visited.test_set(in->_idx)) {
 233           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 234           phis.push(in, CMoveNode::IfTrue);
 235           in = in->in(CMoveNode::IfFalse);
 236           continue;
 237         }
 238         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 239       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 240         in = in->in(1);
 241         continue;
 242       } else {
 243         return false;
 244       }
 245     }
 246     bool cont = false;
 247     while (phis.is_nonempty()) {
 248       uint idx = phis.index();
 249       Node* phi = phis.node();
 250       if (idx >= phi->req()) {
 251         if (trace) {tty->print("Popped phi:"); phi->dump();}
 252         phis.pop();
 253         continue;
 254       }
 255       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 256       in = phi->in(idx);
 257       phis.set_index(idx+1);
 258       cont = true;
 259       break;
 260     }
 261     if (!cont) {
 262       break;
 263     }
 264   }
 265   return true;
 266 }
 267 
 268 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 269   if (n1 != NULL) {
 270     n1->dump(+10);
 271   }
 272   if (n2 != NULL) {
 273     n2->dump(+10);
 274   }
 275   fatal("%s", msg);
 276 }
 277 
 278 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 279   ResourceMark rm;
 280   Unique_Node_List wq;
 281   GrowableArray<Node*> barriers;
 282   Unique_Node_List barriers_used;
 283   Node_Stack phis(0);
 284   VectorSet visited(Thread::current()->resource_area());
 285   const bool trace = false;
 286   const bool verify_no_useless_barrier = false;
 287 
 288   wq.push(root);
 289   for (uint next = 0; next < wq.size(); next++) {
 290     Node *n = wq.at(next);
 291     if (n->is_Load()) {
 292       const bool trace = false;
 293       if (trace) {tty->print("Verifying"); n->dump();}
 294       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 295         if (trace) {tty->print_cr("Load range/klass");}
 296       } else {
 297         const TypePtr* adr_type = n->as_Load()->adr_type();
 298 
 299         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 300           if (trace) {tty->print_cr("Mark load");}
 301         } else if (adr_type->isa_instptr() &&
 302                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 303                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 304           if (trace) {tty->print_cr("Reference.get()");}
 305         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 306           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 307         }
 308       }
 309     } else if (n->is_Store()) {
 310       const bool trace = false;
 311 
 312       if (trace) {tty->print("Verifying"); n->dump();}
 313       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 314         Node* adr = n->in(MemNode::Address);
 315         bool verify = true;
 316 
 317         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 318           adr = adr->in(AddPNode::Address);
 319           if (adr->is_AddP()) {
 320             assert(adr->in(AddPNode::Base)->is_top(), "");
 321             adr = adr->in(AddPNode::Address);
 322             if (adr->Opcode() == Op_LoadP &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 324                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 325                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 326               if (trace) {tty->print_cr("SATB prebarrier");}
 327               verify = false;
 328             }
 329           }
 330         }
 331 
 332         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 333           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 334         }
 335       }
 336       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 337         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 338       }
 339     } else if (n->Opcode() == Op_CmpP) {
 340       const bool trace = false;
 341 
 342       Node* in1 = n->in(1);
 343       Node* in2 = n->in(2);
 344       if (in1->bottom_type()->isa_oopptr()) {
 345         if (trace) {tty->print("Verifying"); n->dump();}
 346 
 347         bool mark_inputs = false;
 348         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 349             (in1->is_Con() || in2->is_Con())) {
 350           if (trace) {tty->print_cr("Comparison against a constant");}
 351           mark_inputs = true;
 352         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 353                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 354           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 355           mark_inputs = true;
 356         } else {
 357           assert(in2->bottom_type()->isa_oopptr(), "");
 358 
 359           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 360               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 361             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 362           }
 363         }
 364         if (verify_no_useless_barrier &&
 365             mark_inputs &&
 366             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 367              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 368           phis.clear();
 369           visited.Reset();
 370         }
 371       }
 372     } else if (n->is_LoadStore()) {
 373       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 374           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 375         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 376       }
 377 
 378       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 379         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 380       }
 381     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 382       CallNode* call = n->as_Call();
 383 
 384       static struct {
 385         const char* name;
 386         struct {
 387           int pos;
 388           verify_type t;
 389         } args[6];
 390       } calls[] = {
 391         "aescrypt_encryptBlock",
 392         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 393           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 394         "aescrypt_decryptBlock",
 395         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 396           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 397         "multiplyToLen",
 398         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 399           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 400         "squareToLen",
 401         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 402           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 403         "montgomery_multiply",
 404         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 405           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 406         "montgomery_square",
 407         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 408           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 409         "mulAdd",
 410         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 411           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 412         "vectorizedMismatch",
 413         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 414           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 415         "updateBytesCRC32",
 416         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 417           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 418         "updateBytesAdler32",
 419         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 420           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 421         "updateBytesCRC32C",
 422         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 423           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 424         "counterMode_AESCrypt",
 425         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 426           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 427         "cipherBlockChaining_encryptAESCrypt",
 428         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 429           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 430         "cipherBlockChaining_decryptAESCrypt",
 431         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 432           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 433         "shenandoah_clone_barrier",
 434         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 435           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 436         "ghash_processBlocks",
 437         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 438           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 439         "sha1_implCompress",
 440         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 441           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 442         "sha256_implCompress",
 443         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 444           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 445         "sha512_implCompress",
 446         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 447           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 448         "sha1_implCompressMB",
 449         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 450           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 451         "sha256_implCompressMB",
 452         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 453           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 454         "sha512_implCompressMB",
 455         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 456           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 457         "encodeBlock",
 458         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 459           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 460       };
 461 
 462       if (call->is_call_to_arraycopystub()) {
 463         Node* dest = NULL;
 464         const TypeTuple* args = n->as_Call()->_tf->domain();
 465         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 466           if (args->field_at(i)->isa_ptr()) {
 467             j++;
 468             if (j == 2) {
 469               dest = n->in(i);
 470               break;
 471             }
 472           }
 473         }
 474         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 475             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 476           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 477         }
 478       } else if (strlen(call->_name) > 5 &&
 479                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 480         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 481           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 482         }
 483       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 484         // skip
 485       } else {
 486         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 487         int i = 0;
 488         for (; i < calls_len; i++) {
 489           if (!strcmp(calls[i].name, call->_name)) {
 490             break;
 491           }
 492         }
 493         if (i != calls_len) {
 494           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 495           for (uint j = 0; j < args_len; j++) {
 496             int pos = calls[i].args[j].pos;
 497             if (pos == -1) {
 498               break;
 499             }
 500             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 501               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 502             }
 503           }
 504           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 505             if (call->in(j)->bottom_type()->make_ptr() &&
 506                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 507               uint k = 0;
 508               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 509               if (k == args_len) {
 510                 fatal("arg %d for call %s not covered", j, call->_name);
 511               }
 512             }
 513           }
 514         } else {
 515           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 516             if (call->in(j)->bottom_type()->make_ptr() &&
 517                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 518               fatal("%s not covered", call->_name);
 519             }
 520           }
 521         }
 522       }
 523     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 524       // skip
 525     } else if (n->is_AddP()
 526                || n->is_Phi()
 527                || n->is_ConstraintCast()
 528                || n->Opcode() == Op_Return
 529                || n->Opcode() == Op_CMoveP
 530                || n->Opcode() == Op_CMoveN
 531                || n->Opcode() == Op_Rethrow
 532                || n->is_MemBar()
 533                || n->Opcode() == Op_Conv2B
 534                || n->Opcode() == Op_SafePoint
 535                || n->is_CallJava()
 536                || n->Opcode() == Op_Unlock
 537                || n->Opcode() == Op_EncodeP
 538                || n->Opcode() == Op_DecodeN) {
 539       // nothing to do
 540     } else {
 541       static struct {
 542         int opcode;
 543         struct {
 544           int pos;
 545           verify_type t;
 546         } inputs[2];
 547       } others[] = {
 548         Op_FastLock,
 549         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 550         Op_Lock,
 551         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 552         Op_ArrayCopy,
 553         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 554         Op_StrCompressedCopy,
 555         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 556         Op_StrInflatedCopy,
 557         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 558         Op_AryEq,
 559         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 560         Op_StrIndexOf,
 561         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 562         Op_StrComp,
 563         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 564         Op_StrEquals,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 566         Op_EncodeISOArray,
 567         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 568         Op_HasNegatives,
 569         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_CastP2X,
 571         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 572         Op_StrIndexOfChar,
 573         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 574       };
 575 
 576       const int others_len = sizeof(others) / sizeof(others[0]);
 577       int i = 0;
 578       for (; i < others_len; i++) {
 579         if (others[i].opcode == n->Opcode()) {
 580           break;
 581         }
 582       }
 583       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 584       if (i != others_len) {
 585         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 586         for (uint j = 0; j < inputs_len; j++) {
 587           int pos = others[i].inputs[j].pos;
 588           if (pos == -1) {
 589             break;
 590           }
 591           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 592             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 593           }
 594         }
 595         for (uint j = 1; j < stop; j++) {
 596           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 597               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 598             uint k = 0;
 599             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 600             if (k == inputs_len) {
 601               fatal("arg %d for node %s not covered", j, n->Name());
 602             }
 603           }
 604         }
 605       } else {
 606         for (uint j = 1; j < stop; j++) {
 607           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 608               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 609             fatal("%s not covered", n->Name());
 610           }
 611         }
 612       }
 613     }
 614 
 615     if (n->is_SafePoint()) {
 616       SafePointNode* sfpt = n->as_SafePoint();
 617       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 618         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 619           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 620             phis.clear();
 621             visited.Reset();
 622           }
 623         }
 624       }
 625     }
 626   }
 627 
 628   if (verify_no_useless_barrier) {
 629     for (int i = 0; i < barriers.length(); i++) {
 630       Node* n = barriers.at(i);
 631       if (!barriers_used.member(n)) {
 632         tty->print("XXX useless barrier"); n->dump(-2);
 633         ShouldNotReachHere();
 634       }
 635     }
 636   }
 637 }
 638 #endif
 639 
 640 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 641   // That both nodes have the same control is not sufficient to prove
 642   // domination, verify that there's no path from d to n
 643   ResourceMark rm;
 644   Unique_Node_List wq;
 645   wq.push(d);
 646   for (uint next = 0; next < wq.size(); next++) {
 647     Node *m = wq.at(next);
 648     if (m == n) {
 649       return false;
 650     }
 651     if (m->is_Phi() && m->in(0)->is_Loop()) {
 652       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 653     } else {
 654       if (m->is_Store() || m->is_LoadStore()) {
 655         // Take anti-dependencies into account
 656         Node* mem = m->in(MemNode::Memory);
 657         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 658           Node* u = mem->fast_out(i);
 659           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 660               phase->ctrl_or_self(u) == c) {
 661             wq.push(u);
 662           }
 663         }
 664       }
 665       for (uint i = 0; i < m->req(); i++) {
 666         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 667           wq.push(m->in(i));
 668         }
 669       }
 670     }
 671   }
 672   return true;
 673 }
 674 
 675 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 676   if (d_c != n_c) {
 677     return phase->is_dominator(d_c, n_c);
 678   }
 679   return is_dominator_same_ctrl(d_c, d, n, phase);
 680 }
 681 
 682 Node* next_mem(Node* mem, int alias) {
 683   Node* res = NULL;
 684   if (mem->is_Proj()) {
 685     res = mem->in(0);
 686   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 687     res = mem->in(TypeFunc::Memory);
 688   } else if (mem->is_Phi()) {
 689     res = mem->in(1);
 690   } else if (mem->is_MergeMem()) {
 691     res = mem->as_MergeMem()->memory_at(alias);
 692   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 693     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 694     res = mem->in(MemNode::Memory);
 695   } else {
 696 #ifdef ASSERT
 697     mem->dump();
 698 #endif
 699     ShouldNotReachHere();
 700   }
 701   return res;
 702 }
 703 
 704 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 705   Node* iffproj = NULL;
 706   while (c != dom) {
 707     Node* next = phase->idom(c);
 708     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 709     if (c->is_Region()) {
 710       ResourceMark rm;
 711       Unique_Node_List wq;
 712       wq.push(c);
 713       for (uint i = 0; i < wq.size(); i++) {
 714         Node *n = wq.at(i);
 715         if (n == next) {
 716           continue;
 717         }
 718         if (n->is_Region()) {
 719           for (uint j = 1; j < n->req(); j++) {
 720             wq.push(n->in(j));
 721           }
 722         } else {
 723           wq.push(n->in(0));
 724         }
 725       }
 726       for (uint i = 0; i < wq.size(); i++) {
 727         Node *n = wq.at(i);
 728         assert(n->is_CFG(), "");
 729         if (n->is_Multi()) {
 730           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 731             Node* u = n->fast_out(j);
 732             if (u->is_CFG()) {
 733               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 734                 return NodeSentinel;
 735               }
 736             }
 737           }
 738         }
 739       }
 740     } else  if (c->is_Proj()) {
 741       if (c->is_IfProj()) {
 742         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 743           // continue;
 744         } else {
 745           if (!allow_one_proj) {
 746             return NodeSentinel;
 747           }
 748           if (iffproj == NULL) {
 749             iffproj = c;
 750           } else {
 751             return NodeSentinel;
 752           }
 753         }
 754       } else if (c->Opcode() == Op_JumpProj) {
 755         return NodeSentinel; // unsupported
 756       } else if (c->Opcode() == Op_CatchProj) {
 757         return NodeSentinel; // unsupported
 758       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 759         return NodeSentinel; // unsupported
 760       } else {
 761         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 762       }
 763     }
 764     c = next;
 765   }
 766   return iffproj;
 767 }
 768 
 769 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 770   ResourceMark rm;
 771   VectorSet wq(Thread::current()->resource_area());
 772   wq.set(mem->_idx);
 773   mem_ctrl = phase->ctrl_or_self(mem);
 774   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 775     mem = next_mem(mem, alias);
 776     if (wq.test_set(mem->_idx)) {
 777       return NULL;
 778     }
 779     mem_ctrl = phase->ctrl_or_self(mem);
 780   }
 781   if (mem->is_MergeMem()) {
 782     mem = mem->as_MergeMem()->memory_at(alias);
 783     mem_ctrl = phase->ctrl_or_self(mem);
 784   }
 785   return mem;
 786 }
 787 
 788 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 789   Node* mem = NULL;
 790   Node* c = ctrl;
 791   do {
 792     if (c->is_Region()) {
 793       Node* phi_bottom = NULL;
 794       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 795         Node* u = c->fast_out(i);
 796         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 797           if (u->adr_type() == TypePtr::BOTTOM) {
 798             mem = u;
 799           }
 800         }
 801       }
 802     } else {
 803       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 804         CallProjections projs;
 805         c->as_Call()->extract_projections(&projs, true, false);
 806         if (projs.fallthrough_memproj != NULL) {
 807           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 808             if (projs.catchall_memproj == NULL) {
 809               mem = projs.fallthrough_memproj;
 810             } else {
 811               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 812                 mem = projs.fallthrough_memproj;
 813               } else {
 814                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 815                 mem = projs.catchall_memproj;
 816               }
 817             }
 818           }
 819         } else {
 820           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 821           if (proj != NULL &&
 822               proj->adr_type() == TypePtr::BOTTOM) {
 823             mem = proj;
 824           }
 825         }
 826       } else {
 827         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 828           Node* u = c->fast_out(i);
 829           if (u->is_Proj() &&
 830               u->bottom_type() == Type::MEMORY &&
 831               u->adr_type() == TypePtr::BOTTOM) {
 832               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 833               assert(mem == NULL, "only one proj");
 834               mem = u;
 835           }
 836         }
 837         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 838       }
 839     }
 840     c = phase->idom(c);
 841   } while (mem == NULL);
 842   return mem;
 843 }
 844 
 845 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 846   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 847     Node* u = n->fast_out(i);
 848     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 849       uses.push(u);
 850     }
 851   }
 852 }
 853 
 854 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 855   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 856   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 857   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 858   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 859   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 860   phase->lazy_replace(outer, new_outer);
 861   phase->lazy_replace(le, new_le);
 862   inner->clear_strip_mined();
 863 }
 864 
 865 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 866                                                PhaseIdealLoop* phase, int flags) {
 867   PhaseIterGVN& igvn = phase->igvn();
 868   Node* old_ctrl = ctrl;
 869 
 870   Node* thread          = new ThreadLocalNode();
 871   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 872   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 873   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 874                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 875                                         TypeInt::BYTE, MemNode::unordered);
 876   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 877   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 878   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 879 
 880   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 881   ctrl                  = new IfTrueNode(gc_state_iff);
 882   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 883 
 884   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 885   phase->register_control(gc_state_iff,   loop, old_ctrl);
 886   phase->register_control(ctrl,           loop, gc_state_iff);
 887   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 888 
 889   phase->register_new_node(thread,        old_ctrl);
 890   phase->register_new_node(gc_state_addr, old_ctrl);
 891   phase->register_new_node(gc_state,      old_ctrl);
 892   phase->register_new_node(gc_state_and,  old_ctrl);
 893   phase->register_new_node(gc_state_cmp,  old_ctrl);
 894   phase->register_new_node(gc_state_bool, old_ctrl);
 895 
 896   phase->set_ctrl(gc_state_offset, phase->C->root());
 897 
 898   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 899 }
 900 
 901 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 902   Node* old_ctrl = ctrl;
 903   PhaseIterGVN& igvn = phase->igvn();
 904 
 905   const Type* val_t = igvn.type(val);
 906   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 907     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 908     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 909 
 910     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 911     ctrl             = new IfTrueNode(null_iff);
 912     null_ctrl        = new IfFalseNode(null_iff);
 913 
 914     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 915     phase->register_control(null_iff,  loop, old_ctrl);
 916     phase->register_control(ctrl,      loop, null_iff);
 917     phase->register_control(null_ctrl, loop, null_iff);
 918 
 919     phase->register_new_node(null_cmp,  old_ctrl);
 920     phase->register_new_node(null_test, old_ctrl);
 921   }
 922 }
 923 
 924 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 925   IdealLoopTree *loop = phase->get_loop(c);
 926   Node* iff = unc_ctrl->in(0);
 927   assert(iff->is_If(), "broken");
 928   Node* new_iff = iff->clone();
 929   new_iff->set_req(0, c);
 930   phase->register_control(new_iff, loop, c);
 931   Node* iffalse = new IfFalseNode(new_iff->as_If());
 932   phase->register_control(iffalse, loop, new_iff);
 933   Node* iftrue = new IfTrueNode(new_iff->as_If());
 934   phase->register_control(iftrue, loop, new_iff);
 935   c = iftrue;
 936   const Type *t = phase->igvn().type(val);
 937   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 938   Node* uncasted_val = val->in(1);
 939   val = new CastPPNode(uncasted_val, t);
 940   val->init_req(0, c);
 941   phase->register_new_node(val, c);
 942   return val;
 943 }
 944 
 945 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 946                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 947   IfNode* iff = unc_ctrl->in(0)->as_If();
 948   Node* proj = iff->proj_out(0);
 949   assert(proj != unc_ctrl, "bad projection");
 950   Node* use = proj->unique_ctrl_out();
 951 
 952   assert(use == unc || use->is_Region(), "what else?");
 953 
 954   uses.clear();
 955   if (use == unc) {
 956     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 957     for (uint i = 1; i < unc->req(); i++) {
 958       Node* n = unc->in(i);
 959       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 960         uses.push(n);
 961       }
 962     }
 963   } else {
 964     assert(use->is_Region(), "what else?");
 965     uint idx = 1;
 966     for (; use->in(idx) != proj; idx++);
 967     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 968       Node* u = use->fast_out(i);
 969       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 970         uses.push(u->in(idx));
 971       }
 972     }
 973   }
 974   for(uint next = 0; next < uses.size(); next++ ) {
 975     Node *n = uses.at(next);
 976     assert(phase->get_ctrl(n) == proj, "bad control");
 977     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 978     if (n->in(0) == proj) {
 979       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 980     }
 981     for (uint i = 0; i < n->req(); i++) {
 982       Node* m = n->in(i);
 983       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 984         uses.push(m);
 985       }
 986     }
 987   }
 988 
 989   phase->igvn().rehash_node_delayed(use);
 990   int nb = use->replace_edge(proj, new_unc_ctrl);
 991   assert(nb == 1, "only use expected");
 992 }
 993 
 994 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 995   Node* old_ctrl = ctrl;
 996   PhaseIterGVN& igvn = phase->igvn();
 997 
 998   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 999   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
1000   Node* cset_addr      = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1001   Node* cset_load_addr = new AddPNode(phase->C->top(), cset_addr, cset_idx);
1002   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_addr,
1003                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
1004                                        TypeInt::BYTE, MemNode::unordered);
1005   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
1006   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
1007 
1008   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1009   ctrl                 = new IfTrueNode(cset_iff);
1010   not_cset_ctrl        = new IfFalseNode(cset_iff);
1011 
1012   IdealLoopTree *loop = phase->get_loop(old_ctrl);
1013   phase->register_control(cset_iff,      loop, old_ctrl);
1014   phase->register_control(ctrl,          loop, cset_iff);
1015   phase->register_control(not_cset_ctrl, loop, cset_iff);
1016 
1017   phase->set_ctrl(cset_addr, phase->C->root());
1018 
1019   phase->register_new_node(raw_val,        old_ctrl);
1020   phase->register_new_node(cset_idx,       old_ctrl);
1021   phase->register_new_node(cset_load_addr, old_ctrl);
1022   phase->register_new_node(cset_load,      old_ctrl);
1023   phase->register_new_node(cset_cmp,       old_ctrl);
1024   phase->register_new_node(cset_bool,      old_ctrl);
1025 }
1026 
1027 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1028   IdealLoopTree*loop = phase->get_loop(ctrl);
1029   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1030 
1031   // The slow path stub consumes and produces raw memory in addition
1032   // to the existing memory edges
1033   Node* base = find_bottom_mem(ctrl, phase);
1034   MergeMemNode* mm = MergeMemNode::make(base);
1035   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1036   phase->register_new_node(mm, ctrl);
1037 
1038   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1039           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1040           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1041 
1042   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(),
1043                                 target,
1044                                 "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1045   call->init_req(TypeFunc::Control, ctrl);
1046   call->init_req(TypeFunc::I_O, phase->C->top());
1047   call->init_req(TypeFunc::Memory, mm);
1048   call->init_req(TypeFunc::FramePtr, phase->C->top());
1049   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1050   call->init_req(TypeFunc::Parms, val);
1051   call->init_req(TypeFunc::Parms+1, load_addr);
1052   phase->register_control(call, loop, ctrl);
1053   ctrl = new ProjNode(call, TypeFunc::Control);
1054   phase->register_control(ctrl, loop, call);
1055   result_mem = new ProjNode(call, TypeFunc::Memory);
1056   phase->register_new_node(result_mem, call);
1057   val = new ProjNode(call, TypeFunc::Parms);
1058   phase->register_new_node(val, call);
1059   val = new CheckCastPPNode(ctrl, val, obj_type);
1060   phase->register_new_node(val, ctrl);
1061 }
1062 
1063 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1064   Node* ctrl = phase->get_ctrl(barrier);
1065   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1066 
1067   // Update the control of all nodes that should be after the
1068   // barrier control flow
1069   uses.clear();
1070   // Every node that is control dependent on the barrier's input
1071   // control will be after the expanded barrier. The raw memory (if
1072   // its memory is control dependent on the barrier's input control)
1073   // must stay above the barrier.
1074   uses_to_ignore.clear();
1075   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1076     uses_to_ignore.push(init_raw_mem);
1077   }
1078   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1079     Node *n = uses_to_ignore.at(next);
1080     for (uint i = 0; i < n->req(); i++) {
1081       Node* in = n->in(i);
1082       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1083         uses_to_ignore.push(in);
1084       }
1085     }
1086   }
1087   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1088     Node* u = ctrl->fast_out(i);
1089     if (u->_idx < last &&
1090         u != barrier &&
1091         !uses_to_ignore.member(u) &&
1092         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1093         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1094       Node* old_c = phase->ctrl_or_self(u);
1095       Node* c = old_c;
1096       if (c != ctrl ||
1097           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1098           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1099         phase->igvn().rehash_node_delayed(u);
1100         int nb = u->replace_edge(ctrl, region);
1101         if (u->is_CFG()) {
1102           if (phase->idom(u) == ctrl) {
1103             phase->set_idom(u, region, phase->dom_depth(region));
1104           }
1105         } else if (phase->get_ctrl(u) == ctrl) {
1106           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1107           uses.push(u);
1108         }
1109         assert(nb == 1, "more than 1 ctrl input?");
1110         --i, imax -= nb;
1111       }
1112     }
1113   }
1114 }
1115 
1116 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1117   Node* region = NULL;
1118   while (c != ctrl) {
1119     if (c->is_Region()) {
1120       region = c;
1121     }
1122     c = phase->idom(c);
1123   }
1124   assert(region != NULL, "");
1125   Node* phi = new PhiNode(region, n->bottom_type());
1126   for (uint j = 1; j < region->req(); j++) {
1127     Node* in = region->in(j);
1128     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1129       phi->init_req(j, n);
1130     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1131       phi->init_req(j, n_clone);
1132     } else {
1133       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1134     }
1135   }
1136   phase->register_new_node(phi, region);
1137   return phi;
1138 }
1139 
1140 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1141   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1142 
1143   Unique_Node_List uses;
1144   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1145     Node* barrier = state->enqueue_barrier(i);
1146     Node* ctrl = phase->get_ctrl(barrier);
1147     IdealLoopTree* loop = phase->get_loop(ctrl);
1148     if (loop->_head->is_OuterStripMinedLoop()) {
1149       // Expanding a barrier here will break loop strip mining
1150       // verification. Transform the loop so the loop nest doesn't
1151       // appear as strip mined.
1152       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1153       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1154     }
1155   }
1156 
1157   Node_Stack stack(0);
1158   Node_List clones;
1159   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1160     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1161     if (lrb->is_redundant()) {
1162       continue;
1163     }
1164 
1165     Node* ctrl = phase->get_ctrl(lrb);
1166     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1167 
1168     CallStaticJavaNode* unc = NULL;
1169     Node* unc_ctrl = NULL;
1170     Node* uncasted_val = val;
1171 
1172     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1173       Node* u = lrb->fast_out(i);
1174       if (u->Opcode() == Op_CastPP &&
1175           u->in(0) != NULL &&
1176           phase->is_dominator(u->in(0), ctrl)) {
1177         const Type* u_t = phase->igvn().type(u);
1178 
1179         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1180             u->in(0)->Opcode() == Op_IfTrue &&
1181             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1182             u->in(0)->in(0)->is_If() &&
1183             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1184             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1185             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1186             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1187             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1188           IdealLoopTree* loop = phase->get_loop(ctrl);
1189           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1190 
1191           if (!unc_loop->is_member(loop)) {
1192             continue;
1193           }
1194 
1195           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1196           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1197           if (branch == NodeSentinel) {
1198             continue;
1199           }
1200 
1201           phase->igvn().replace_input_of(u, 1, val);
1202           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1203           phase->set_ctrl(u, u->in(0));
1204           phase->set_ctrl(lrb, u->in(0));
1205           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1206           unc_ctrl = u->in(0);
1207           val = u;
1208 
1209           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1210             Node* u = val->fast_out(j);
1211             if (u == lrb) continue;
1212             phase->igvn().rehash_node_delayed(u);
1213             int nb = u->replace_edge(val, lrb);
1214             --j; jmax -= nb;
1215           }
1216 
1217           RegionNode* r = new RegionNode(3);
1218           IfNode* iff = unc_ctrl->in(0)->as_If();
1219 
1220           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1221           Node* unc_ctrl_clone = unc_ctrl->clone();
1222           phase->register_control(unc_ctrl_clone, loop, iff);
1223           Node* c = unc_ctrl_clone;
1224           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1225           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1226 
1227           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1228           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1229           phase->lazy_replace(c, unc_ctrl);
1230           c = NULL;;
1231           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1232           phase->set_ctrl(val, unc_ctrl_clone);
1233 
1234           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1235           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1236           Node* iff_proj = iff->proj_out(0);
1237           r->init_req(2, iff_proj);
1238           phase->register_control(r, phase->ltree_root(), iff);
1239 
1240           Node* new_bol = new_iff->in(1)->clone();
1241           Node* new_cmp = new_bol->in(1)->clone();
1242           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1243           assert(new_cmp->in(1) == val->in(1), "broken");
1244           new_bol->set_req(1, new_cmp);
1245           new_cmp->set_req(1, lrb);
1246           phase->register_new_node(new_bol, new_iff->in(0));
1247           phase->register_new_node(new_cmp, new_iff->in(0));
1248           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1249           phase->igvn().replace_input_of(new_cast, 1, lrb);
1250 
1251           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1252             Node* u = lrb->fast_out(i);
1253             if (u == new_cast || u == new_cmp) {
1254               continue;
1255             }
1256             phase->igvn().rehash_node_delayed(u);
1257             int nb = u->replace_edge(lrb, new_cast);
1258             assert(nb > 0, "no update?");
1259             --i; imax -= nb;
1260           }
1261 
1262           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1263             Node* u = val->fast_out(i);
1264             if (u == lrb) {
1265               continue;
1266             }
1267             phase->igvn().rehash_node_delayed(u);
1268             int nb = u->replace_edge(val, new_cast);
1269             assert(nb > 0, "no update?");
1270             --i; imax -= nb;
1271           }
1272 
1273           ctrl = unc_ctrl_clone;
1274           phase->set_ctrl_and_loop(lrb, ctrl);
1275           break;
1276         }
1277       }
1278     }
1279     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1280       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1281       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1282         // The rethrow call may have too many projections to be
1283         // properly handled here. Given there's no reason for a
1284         // barrier to depend on the call, move it above the call
1285         stack.push(lrb, 0);
1286         do {
1287           Node* n = stack.node();
1288           uint idx = stack.index();
1289           if (idx < n->req()) {
1290             Node* in = n->in(idx);
1291             stack.set_index(idx+1);
1292             if (in != NULL) {
1293               if (phase->has_ctrl(in)) {
1294                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1295 #ifdef ASSERT
1296                   for (uint i = 0; i < stack.size(); i++) {
1297                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1298                   }
1299 #endif
1300                   stack.push(in, 0);
1301                 }
1302               } else {
1303                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1304               }
1305             }
1306           } else {
1307             phase->set_ctrl(n, call->in(0));
1308             stack.pop();
1309           }
1310         } while(stack.size() > 0);
1311         continue;
1312       }
1313       CallProjections projs;
1314       call->extract_projections(&projs, false, false);
1315 
1316 #ifdef ASSERT
1317       VectorSet cloned(Thread::current()->resource_area());
1318 #endif
1319       Node* lrb_clone = lrb->clone();
1320       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1321       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1322 
1323       stack.push(lrb, 0);
1324       clones.push(lrb_clone);
1325 
1326       do {
1327         assert(stack.size() == clones.size(), "");
1328         Node* n = stack.node();
1329 #ifdef ASSERT
1330         if (n->is_Load()) {
1331           Node* mem = n->in(MemNode::Memory);
1332           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1333             Node* u = mem->fast_out(j);
1334             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1335           }
1336         }
1337 #endif
1338         uint idx = stack.index();
1339         Node* n_clone = clones.at(clones.size()-1);
1340         if (idx < n->outcnt()) {
1341           Node* u = n->raw_out(idx);
1342           Node* c = phase->ctrl_or_self(u);
1343           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1344             stack.set_index(idx+1);
1345             assert(!u->is_CFG(), "");
1346             stack.push(u, 0);
1347             assert(!cloned.test_set(u->_idx), "only one clone");
1348             Node* u_clone = u->clone();
1349             int nb = u_clone->replace_edge(n, n_clone);
1350             assert(nb > 0, "should have replaced some uses");
1351             phase->register_new_node(u_clone, projs.catchall_catchproj);
1352             clones.push(u_clone);
1353             phase->set_ctrl(u, projs.fallthrough_catchproj);
1354           } else {
1355             bool replaced = false;
1356             if (u->is_Phi()) {
1357               for (uint k = 1; k < u->req(); k++) {
1358                 if (u->in(k) == n) {
1359                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1360                     phase->igvn().replace_input_of(u, k, n_clone);
1361                     replaced = true;
1362                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1363                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1364                     replaced = true;
1365                   }
1366                 }
1367               }
1368             } else {
1369               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1370                 phase->igvn().rehash_node_delayed(u);
1371                 int nb = u->replace_edge(n, n_clone);
1372                 assert(nb > 0, "should have replaced some uses");
1373                 replaced = true;
1374               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1375                 if (u->is_If()) {
1376                   // Can't break If/Bool/Cmp chain
1377                   assert(n->is_Bool(), "unexpected If shape");
1378                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1379                   assert(n_clone->is_Bool(), "unexpected clone");
1380                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1381                   Node* bol_clone = n->clone();
1382                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1383                   bol_clone->set_req(1, cmp_clone);
1384 
1385                   Node* nn = stack.node_at(stack.size()-3);
1386                   Node* nn_clone = clones.at(clones.size()-3);
1387                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1388 
1389                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase));
1390                   assert(nb > 0, "should have replaced some uses");
1391 
1392                   phase->register_new_node(bol_clone, u->in(0));
1393                   phase->register_new_node(cmp_clone, u->in(0));
1394 
1395                   phase->igvn().replace_input_of(u, 1, bol_clone);
1396 
1397                 } else {
1398                   phase->igvn().rehash_node_delayed(u);
1399                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1400                   assert(nb > 0, "should have replaced some uses");
1401                 }
1402                 replaced = true;
1403               }
1404             }
1405             if (!replaced) {
1406               stack.set_index(idx+1);
1407             }
1408           }
1409         } else {
1410           stack.pop();
1411           clones.pop();
1412         }
1413       } while (stack.size() > 0);
1414       assert(stack.size() == 0 && clones.size() == 0, "");
1415     }
1416   }
1417 
1418   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1419     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1420     if (lrb->is_redundant()) {
1421       continue;
1422     }
1423     Node* ctrl = phase->get_ctrl(lrb);
1424     IdealLoopTree* loop = phase->get_loop(ctrl);
1425     if (loop->_head->is_OuterStripMinedLoop()) {
1426       // Expanding a barrier here will break loop strip mining
1427       // verification. Transform the loop so the loop nest doesn't
1428       // appear as strip mined.
1429       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1430       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1431     }
1432   }
1433 
1434   // Expand load-reference-barriers
1435   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1436   Unique_Node_List uses_to_ignore;
1437   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1438     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1439     if (lrb->is_redundant()) {
1440       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1441       continue;
1442     }
1443     uint last = phase->C->unique();
1444     Node* ctrl = phase->get_ctrl(lrb);
1445     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1446 
1447 
1448     Node* orig_ctrl = ctrl;
1449 
1450     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1451     Node* init_raw_mem = raw_mem;
1452     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1453 
1454     IdealLoopTree *loop = phase->get_loop(ctrl);
1455     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1456     Node* unc_ctrl = NULL;
1457     if (unc != NULL) {
1458       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1459         unc = NULL;
1460       } else {
1461         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1462       }
1463     }
1464 
1465     Node* uncasted_val = val;
1466     if (unc != NULL) {
1467       uncasted_val = val->in(1);
1468     }
1469 
1470     Node* heap_stable_ctrl = NULL;
1471     Node* null_ctrl = NULL;
1472 
1473     assert(val->bottom_type()->make_oopptr(), "need oop");
1474     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1475 
1476     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1477     Node* region = new RegionNode(PATH_LIMIT);
1478     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1479     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1480 
1481     // Stable path.
1482     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
1483     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1484 
1485     // Heap stable case
1486     region->init_req(_heap_stable, heap_stable_ctrl);
1487     val_phi->init_req(_heap_stable, uncasted_val);
1488     raw_mem_phi->init_req(_heap_stable, raw_mem);
1489 
1490     Node* reg2_ctrl = NULL;
1491     // Null case
1492     test_null(ctrl, val, null_ctrl, phase);
1493     if (null_ctrl != NULL) {
1494       reg2_ctrl = null_ctrl->in(0);
1495       region->init_req(_null_path, null_ctrl);
1496       val_phi->init_req(_null_path, uncasted_val);
1497       raw_mem_phi->init_req(_null_path, raw_mem);
1498     } else {
1499       region->del_req(_null_path);
1500       val_phi->del_req(_null_path);
1501       raw_mem_phi->del_req(_null_path);
1502     }
1503 
1504     // Test for in-cset.
1505     // Wires !in_cset(obj) to slot 2 of region and phis
1506     Node* not_cset_ctrl = NULL;
1507     test_in_cset(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1508     if (not_cset_ctrl != NULL) {
1509       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1510       region->init_req(_not_cset, not_cset_ctrl);
1511       val_phi->init_req(_not_cset, uncasted_val);
1512       raw_mem_phi->init_req(_not_cset, raw_mem);
1513     }
1514 
1515     // Resolve object when orig-value is in cset.
1516     // Make the unconditional resolve for fwdptr.
1517     Node* new_val = uncasted_val;
1518     if (unc_ctrl != NULL) {
1519       // Clone the null check in this branch to allow implicit null check
1520       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1521       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1522 
1523       IfNode* iff = unc_ctrl->in(0)->as_If();
1524       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1525     }
1526 
1527     // Call lrb-stub and wire up that path in slots 4
1528     Node* result_mem = NULL;
1529 
1530     Node* fwd = new_val;
1531     Node* addr;
1532     if (ShenandoahSelfFixing) {
1533       VectorSet visited(Thread::current()->resource_area());
1534       addr = get_load_addr(phase, visited, lrb);
1535     } else {
1536       addr = phase->igvn().zerocon(T_OBJECT);
1537     }
1538     if (addr->Opcode() == Op_AddP) {
1539       Node* orig_base = addr->in(AddPNode::Base);
1540       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true);
1541       phase->register_new_node(base, ctrl);
1542       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1543         // Field access
1544         addr = addr->clone();
1545         addr->set_req(AddPNode::Base, base);
1546         addr->set_req(AddPNode::Address, base);
1547         phase->register_new_node(addr, ctrl);
1548       } else {
1549         Node* addr2 = addr->in(AddPNode::Address);
1550         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1551               addr2->in(AddPNode::Base) == orig_base) {
1552           addr2 = addr2->clone();
1553           addr2->set_req(AddPNode::Base, base);
1554           addr2->set_req(AddPNode::Address, base);
1555           phase->register_new_node(addr2, ctrl);
1556           addr = addr->clone();
1557           addr->set_req(AddPNode::Base, base);
1558           addr->set_req(AddPNode::Address, addr2);
1559           phase->register_new_node(addr, ctrl);
1560         }
1561       }
1562     }
1563     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, false, phase);
1564     region->init_req(_evac_path, ctrl);
1565     val_phi->init_req(_evac_path, fwd);
1566     raw_mem_phi->init_req(_evac_path, result_mem);
1567 
1568     phase->register_control(region, loop, heap_stable_iff);
1569     Node* out_val = val_phi;
1570     phase->register_new_node(val_phi, region);
1571     phase->register_new_node(raw_mem_phi, region);
1572 
1573     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1574 
1575     ctrl = orig_ctrl;
1576 
1577     if (unc != NULL) {
1578       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1579         Node* u = val->fast_out(i);
1580         Node* c = phase->ctrl_or_self(u);
1581         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1582           phase->igvn().rehash_node_delayed(u);
1583           int nb = u->replace_edge(val, out_val);
1584           --i, imax -= nb;
1585         }
1586       }
1587       if (val->outcnt() == 0) {
1588         phase->igvn()._worklist.push(val);
1589       }
1590     }
1591     phase->igvn().replace_node(lrb, out_val);
1592 
1593     follow_barrier_uses(out_val, ctrl, uses, phase);
1594 
1595     for(uint next = 0; next < uses.size(); next++ ) {
1596       Node *n = uses.at(next);
1597       assert(phase->get_ctrl(n) == ctrl, "bad control");
1598       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1599       phase->set_ctrl(n, region);
1600       follow_barrier_uses(n, ctrl, uses, phase);
1601     }
1602 
1603     // The slow path call produces memory: hook the raw memory phi
1604     // from the expanded load reference barrier with the rest of the graph
1605     // which may require adding memory phis at every post dominated
1606     // region and at enclosing loop heads. Use the memory state
1607     // collected in memory_nodes to fix the memory graph. Update that
1608     // memory state as we go.
1609     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1610   }
1611   // Done expanding load-reference-barriers.
1612   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1613 
1614   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1615     Node* barrier = state->enqueue_barrier(i);
1616     Node* pre_val = barrier->in(1);
1617 
1618     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1619       ShouldNotReachHere();
1620       continue;
1621     }
1622 
1623     Node* ctrl = phase->get_ctrl(barrier);
1624 
1625     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1626       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1627       ctrl = ctrl->in(0)->in(0);
1628       phase->set_ctrl(barrier, ctrl);
1629     } else if (ctrl->is_CallRuntime()) {
1630       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1631       ctrl = ctrl->in(0);
1632       phase->set_ctrl(barrier, ctrl);
1633     }
1634 
1635     Node* init_ctrl = ctrl;
1636     IdealLoopTree* loop = phase->get_loop(ctrl);
1637     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1638     Node* init_raw_mem = raw_mem;
1639     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1640     Node* heap_stable_ctrl = NULL;
1641     Node* null_ctrl = NULL;
1642     uint last = phase->C->unique();
1643 
1644     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1645     Node* region = new RegionNode(PATH_LIMIT);
1646     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1647 
1648     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1649     Node* region2 = new RegionNode(PATH_LIMIT2);
1650     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1651 
1652     // Stable path.
1653     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1654     region->init_req(_heap_stable, heap_stable_ctrl);
1655     phi->init_req(_heap_stable, raw_mem);
1656 
1657     // Null path
1658     Node* reg2_ctrl = NULL;
1659     test_null(ctrl, pre_val, null_ctrl, phase);
1660     if (null_ctrl != NULL) {
1661       reg2_ctrl = null_ctrl->in(0);
1662       region2->init_req(_null_path, null_ctrl);
1663       phi2->init_req(_null_path, raw_mem);
1664     } else {
1665       region2->del_req(_null_path);
1666       phi2->del_req(_null_path);
1667     }
1668 
1669     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1670     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1671     Node* thread = new ThreadLocalNode();
1672     phase->register_new_node(thread, ctrl);
1673     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1674     phase->register_new_node(buffer_adr, ctrl);
1675     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1676     phase->register_new_node(index_adr, ctrl);
1677 
1678     BasicType index_bt = TypeX_X->basic_type();
1679     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1680     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1681     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1682     phase->register_new_node(index, ctrl);
1683     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1684     phase->register_new_node(index_cmp, ctrl);
1685     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1686     phase->register_new_node(index_test, ctrl);
1687     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1688     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1689     phase->register_control(queue_full_iff, loop, ctrl);
1690     Node* not_full = new IfTrueNode(queue_full_iff);
1691     phase->register_control(not_full, loop, queue_full_iff);
1692     Node* full = new IfFalseNode(queue_full_iff);
1693     phase->register_control(full, loop, queue_full_iff);
1694 
1695     ctrl = not_full;
1696 
1697     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1698     phase->register_new_node(next_index, ctrl);
1699 
1700     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1701     phase->register_new_node(buffer, ctrl);
1702     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1703     phase->register_new_node(log_addr, ctrl);
1704     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1705     phase->register_new_node(log_store, ctrl);
1706     // update the index
1707     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1708     phase->register_new_node(index_update, ctrl);
1709 
1710     // Fast-path case
1711     region2->init_req(_fast_path, ctrl);
1712     phi2->init_req(_fast_path, index_update);
1713 
1714     ctrl = full;
1715 
1716     Node* base = find_bottom_mem(ctrl, phase);
1717 
1718     MergeMemNode* mm = MergeMemNode::make(base);
1719     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1720     phase->register_new_node(mm, ctrl);
1721 
1722     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1723     call->init_req(TypeFunc::Control, ctrl);
1724     call->init_req(TypeFunc::I_O, phase->C->top());
1725     call->init_req(TypeFunc::Memory, mm);
1726     call->init_req(TypeFunc::FramePtr, phase->C->top());
1727     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1728     call->init_req(TypeFunc::Parms, pre_val);
1729     call->init_req(TypeFunc::Parms+1, thread);
1730     phase->register_control(call, loop, ctrl);
1731 
1732     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1733     phase->register_control(ctrl_proj, loop, call);
1734     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1735     phase->register_new_node(mem_proj, call);
1736 
1737     // Slow-path case
1738     region2->init_req(_slow_path, ctrl_proj);
1739     phi2->init_req(_slow_path, mem_proj);
1740 
1741     phase->register_control(region2, loop, reg2_ctrl);
1742     phase->register_new_node(phi2, region2);
1743 
1744     region->init_req(_heap_unstable, region2);
1745     phi->init_req(_heap_unstable, phi2);
1746 
1747     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1748     phase->register_new_node(phi, region);
1749 
1750     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1751     for(uint next = 0; next < uses.size(); next++ ) {
1752       Node *n = uses.at(next);
1753       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1754       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1755       phase->set_ctrl(n, region);
1756       follow_barrier_uses(n, init_ctrl, uses, phase);
1757     }
1758     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1759 
1760     phase->igvn().replace_node(barrier, pre_val);
1761   }
1762   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1763 
1764 }
1765 
1766 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1767   if (visited.test_set(in->_idx)) {
1768     return NULL;
1769   }
1770   switch (in->Opcode()) {
1771     case Op_Proj:
1772       return get_load_addr(phase, visited, in->in(0));
1773     case Op_CastPP:
1774     case Op_CheckCastPP:
1775     case Op_DecodeN:
1776     case Op_EncodeP:
1777       return get_load_addr(phase, visited, in->in(1));
1778     case Op_LoadN:
1779     case Op_LoadP:
1780       return in->in(MemNode::Address);
1781     case Op_CompareAndExchangeN:
1782     case Op_CompareAndExchangeP:
1783     case Op_GetAndSetN:
1784     case Op_GetAndSetP:
1785     case Op_ShenandoahCompareAndExchangeP:
1786     case Op_ShenandoahCompareAndExchangeN:
1787       // Those instructions would just have stored a different
1788       // value into the field. No use to attempt to fix it at this point.
1789       return phase->igvn().zerocon(T_OBJECT);
1790     case Op_CMoveP:
1791     case Op_CMoveN: {
1792       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1793       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1794       // Handle unambiguous cases: single address reported on both branches.
1795       if (t != NULL && f == NULL) return t;
1796       if (t == NULL && f != NULL) return f;
1797       if (t != NULL && t == f)    return t;
1798       // Ambiguity.
1799       return phase->igvn().zerocon(T_OBJECT);
1800     }
1801     case Op_Phi: {
1802       Node* addr = NULL;
1803       for (uint i = 1; i < in->req(); i++) {
1804         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1805         if (addr == NULL) {
1806           addr = addr1;
1807         }
1808         if (addr != addr1) {
1809           return phase->igvn().zerocon(T_OBJECT);
1810         }
1811       }
1812       return addr;
1813     }
1814     case Op_ShenandoahLoadReferenceBarrier:
1815       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1816     case Op_ShenandoahEnqueueBarrier:
1817       return get_load_addr(phase, visited, in->in(1));
1818     case Op_CallDynamicJava:
1819     case Op_CallLeaf:
1820     case Op_CallStaticJava:
1821     case Op_ConN:
1822     case Op_ConP:
1823     case Op_Parm:
1824     case Op_CreateEx:
1825       return phase->igvn().zerocon(T_OBJECT);
1826     default:
1827 #ifdef ASSERT
1828       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1829 #endif
1830       return phase->igvn().zerocon(T_OBJECT);
1831   }
1832 
1833 }
1834 
1835 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1836   IdealLoopTree *loop = phase->get_loop(iff);
1837   Node* loop_head = loop->_head;
1838   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1839 
1840   Node* bol = iff->in(1);
1841   Node* cmp = bol->in(1);
1842   Node* andi = cmp->in(1);
1843   Node* load = andi->in(1);
1844 
1845   assert(is_gc_state_load(load), "broken");
1846   if (!phase->is_dominator(load->in(0), entry_c)) {
1847     Node* mem_ctrl = NULL;
1848     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1849     load = load->clone();
1850     load->set_req(MemNode::Memory, mem);
1851     load->set_req(0, entry_c);
1852     phase->register_new_node(load, entry_c);
1853     andi = andi->clone();
1854     andi->set_req(1, load);
1855     phase->register_new_node(andi, entry_c);
1856     cmp = cmp->clone();
1857     cmp->set_req(1, andi);
1858     phase->register_new_node(cmp, entry_c);
1859     bol = bol->clone();
1860     bol->set_req(1, cmp);
1861     phase->register_new_node(bol, entry_c);
1862 
1863     Node* old_bol =iff->in(1);
1864     phase->igvn().replace_input_of(iff, 1, bol);
1865   }
1866 }
1867 
1868 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1869   if (!n->is_If() || n->is_CountedLoopEnd()) {
1870     return false;
1871   }
1872   Node* region = n->in(0);
1873 
1874   if (!region->is_Region()) {
1875     return false;
1876   }
1877   Node* dom = phase->idom(region);
1878   if (!dom->is_If()) {
1879     return false;
1880   }
1881 
1882   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1883     return false;
1884   }
1885 
1886   IfNode* dom_if = dom->as_If();
1887   Node* proj_true = dom_if->proj_out(1);
1888   Node* proj_false = dom_if->proj_out(0);
1889 
1890   for (uint i = 1; i < region->req(); i++) {
1891     if (phase->is_dominator(proj_true, region->in(i))) {
1892       continue;
1893     }
1894     if (phase->is_dominator(proj_false, region->in(i))) {
1895       continue;
1896     }
1897     return false;
1898   }
1899 
1900   return true;
1901 }
1902 
1903 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1904   assert(is_heap_stable_test(n), "no other tests");
1905   if (identical_backtoback_ifs(n, phase)) {
1906     Node* n_ctrl = n->in(0);
1907     if (phase->can_split_if(n_ctrl)) {
1908       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1909       if (is_heap_stable_test(n)) {
1910         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1911         assert(is_gc_state_load(gc_state_load), "broken");
1912         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1913         assert(is_gc_state_load(dom_gc_state_load), "broken");
1914         if (gc_state_load != dom_gc_state_load) {
1915           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1916         }
1917       }
1918       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1919       Node* proj_true = dom_if->proj_out(1);
1920       Node* proj_false = dom_if->proj_out(0);
1921       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1922       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1923 
1924       for (uint i = 1; i < n_ctrl->req(); i++) {
1925         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1926           bolphi->init_req(i, con_true);
1927         } else {
1928           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1929           bolphi->init_req(i, con_false);
1930         }
1931       }
1932       phase->register_new_node(bolphi, n_ctrl);
1933       phase->igvn().replace_input_of(n, 1, bolphi);
1934       phase->do_split_if(n);
1935     }
1936   }
1937 }
1938 
1939 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1940   // Find first invariant test that doesn't exit the loop
1941   LoopNode *head = loop->_head->as_Loop();
1942   IfNode* unswitch_iff = NULL;
1943   Node* n = head->in(LoopNode::LoopBackControl);
1944   int loop_has_sfpts = -1;
1945   while (n != head) {
1946     Node* n_dom = phase->idom(n);
1947     if (n->is_Region()) {
1948       if (n_dom->is_If()) {
1949         IfNode* iff = n_dom->as_If();
1950         if (iff->in(1)->is_Bool()) {
1951           BoolNode* bol = iff->in(1)->as_Bool();
1952           if (bol->in(1)->is_Cmp()) {
1953             // If condition is invariant and not a loop exit,
1954             // then found reason to unswitch.
1955             if (is_heap_stable_test(iff) &&
1956                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1957               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1958               if (loop_has_sfpts == -1) {
1959                 for(uint i = 0; i < loop->_body.size(); i++) {
1960                   Node *m = loop->_body[i];
1961                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1962                     loop_has_sfpts = 1;
1963                     break;
1964                   }
1965                 }
1966                 if (loop_has_sfpts == -1) {
1967                   loop_has_sfpts = 0;
1968                 }
1969               }
1970               if (!loop_has_sfpts) {
1971                 unswitch_iff = iff;
1972               }
1973             }
1974           }
1975         }
1976       }
1977     }
1978     n = n_dom;
1979   }
1980   return unswitch_iff;
1981 }
1982 
1983 
1984 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1985   Node_List heap_stable_tests;
1986   stack.push(phase->C->start(), 0);
1987   do {
1988     Node* n = stack.node();
1989     uint i = stack.index();
1990 
1991     if (i < n->outcnt()) {
1992       Node* u = n->raw_out(i);
1993       stack.set_index(i+1);
1994       if (!visited.test_set(u->_idx)) {
1995         stack.push(u, 0);
1996       }
1997     } else {
1998       stack.pop();
1999       if (n->is_If() && is_heap_stable_test(n)) {
2000         heap_stable_tests.push(n);
2001       }
2002     }
2003   } while (stack.size() > 0);
2004 
2005   for (uint i = 0; i < heap_stable_tests.size(); i++) {
2006     Node* n = heap_stable_tests.at(i);
2007     assert(is_heap_stable_test(n), "only evacuation test");
2008     merge_back_to_back_tests(n, phase);
2009   }
2010 
2011   if (!phase->C->major_progress()) {
2012     VectorSet seen(Thread::current()->resource_area());
2013     for (uint i = 0; i < heap_stable_tests.size(); i++) {
2014       Node* n = heap_stable_tests.at(i);
2015       IdealLoopTree* loop = phase->get_loop(n);
2016       if (loop != phase->ltree_root() &&
2017           loop->_child == NULL &&
2018           !loop->_irreducible) {
2019         Node* head = loop->_head;
2020         if (head->is_Loop() &&
2021             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
2022             !seen.test_set(head->_idx)) {
2023           IfNode* iff = find_unswitching_candidate(loop, phase);
2024           if (iff != NULL) {
2025             Node* bol = iff->in(1);
2026             if (head->as_Loop()->is_strip_mined()) {
2027               head->as_Loop()->verify_strip_mined(0);
2028             }
2029             move_gc_state_test_out_of_loop(iff, phase);
2030             if (loop->policy_unswitching(phase)) {
2031               if (head->as_Loop()->is_strip_mined()) {
2032                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
2033                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
2034               }
2035               phase->do_unswitching(loop, old_new);
2036             } else {
2037               // Not proceeding with unswitching. Move load back in
2038               // the loop.
2039               phase->igvn().replace_input_of(iff, 1, bol);
2040             }
2041           }
2042         }
2043       }
2044     }
2045   }
2046 }
2047 
2048 #ifdef ASSERT
2049 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
2050   const bool trace = false;
2051   ResourceMark rm;
2052   Unique_Node_List nodes;
2053   Unique_Node_List controls;
2054   Unique_Node_List memories;
2055 
2056   nodes.push(root);
2057   for (uint next = 0; next < nodes.size(); next++) {
2058     Node *n  = nodes.at(next);
2059     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2060       controls.push(n);
2061       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2062       for (uint next2 = 0; next2 < controls.size(); next2++) {
2063         Node *m = controls.at(next2);
2064         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2065           Node* u = m->fast_out(i);
2066           if (u->is_CFG() && !u->is_Root() &&
2067               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2068               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2069             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2070             controls.push(u);
2071           }
2072         }
2073       }
2074       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2075       for (uint next2 = 0; next2 < memories.size(); next2++) {
2076         Node *m = memories.at(next2);
2077         assert(m->bottom_type() == Type::MEMORY, "");
2078         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2079           Node* u = m->fast_out(i);
2080           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2081             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2082             memories.push(u);
2083           } else if (u->is_LoadStore()) {
2084             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2085             memories.push(u->find_out_with(Op_SCMemProj));
2086           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2087             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2088             memories.push(u);
2089           } else if (u->is_Phi()) {
2090             assert(u->bottom_type() == Type::MEMORY, "");
2091             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2092               assert(controls.member(u->in(0)), "");
2093               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2094               memories.push(u);
2095             }
2096           } else if (u->is_SafePoint() || u->is_MemBar()) {
2097             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2098               Node* uu = u->fast_out(j);
2099               if (uu->bottom_type() == Type::MEMORY) {
2100                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2101                 memories.push(uu);
2102               }
2103             }
2104           }
2105         }
2106       }
2107       for (uint next2 = 0; next2 < controls.size(); next2++) {
2108         Node *m = controls.at(next2);
2109         if (m->is_Region()) {
2110           bool all_in = true;
2111           for (uint i = 1; i < m->req(); i++) {
2112             if (!controls.member(m->in(i))) {
2113               all_in = false;
2114               break;
2115             }
2116           }
2117           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2118           bool found_phi = false;
2119           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2120             Node* u = m->fast_out(j);
2121             if (u->is_Phi() && memories.member(u)) {
2122               found_phi = true;
2123               for (uint i = 1; i < u->req() && found_phi; i++) {
2124                 Node* k = u->in(i);
2125                 if (memories.member(k) != controls.member(m->in(i))) {
2126                   found_phi = false;
2127                 }
2128               }
2129             }
2130           }
2131           assert(found_phi || all_in, "");
2132         }
2133       }
2134       controls.clear();
2135       memories.clear();
2136     }
2137     for( uint i = 0; i < n->len(); ++i ) {
2138       Node *m = n->in(i);
2139       if (m != NULL) {
2140         nodes.push(m);
2141       }
2142     }
2143   }
2144 }
2145 #endif
2146 
2147 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2148   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2149 }
2150 
2151 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2152   if (in(1) == NULL || in(1)->is_top()) {
2153     return Type::TOP;
2154   }
2155   const Type* t = in(1)->bottom_type();
2156   if (t == TypePtr::NULL_PTR) {
2157     return t;
2158   }
2159   return t->is_oopptr();
2160 }
2161 
2162 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2163   if (in(1) == NULL) {
2164     return Type::TOP;
2165   }
2166   const Type* t = phase->type(in(1));
2167   if (t == Type::TOP) {
2168     return Type::TOP;
2169   }
2170   if (t == TypePtr::NULL_PTR) {
2171     return t;
2172   }
2173   return t->is_oopptr();
2174 }
2175 
2176 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2177   if (n == NULL ||
2178       n->is_Allocate() ||
2179       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2180       n->bottom_type() == TypePtr::NULL_PTR ||
2181       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2182     return NotNeeded;
2183   }
2184   if (n->is_Phi() ||
2185       n->is_CMove()) {
2186     return MaybeNeeded;
2187   }
2188   return Needed;
2189 }
2190 
2191 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2192   for (;;) {
2193     if (n == NULL) {
2194       return n;
2195     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2196       return n;
2197     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2198       return n;
2199     } else if (n->is_ConstraintCast() ||
2200                n->Opcode() == Op_DecodeN ||
2201                n->Opcode() == Op_EncodeP) {
2202       n = n->in(1);
2203     } else if (n->is_Proj()) {
2204       n = n->in(0);
2205     } else {
2206       return n;
2207     }
2208   }
2209   ShouldNotReachHere();
2210   return NULL;
2211 }
2212 
2213 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2214   PhaseIterGVN* igvn = phase->is_IterGVN();
2215 
2216   Node* n = next(in(1));
2217 
2218   int cont = needed(n);
2219 
2220   if (cont == NotNeeded) {
2221     return in(1);
2222   } else if (cont == MaybeNeeded) {
2223     if (igvn == NULL) {
2224       phase->record_for_igvn(this);
2225       return this;
2226     } else {
2227       ResourceMark rm;
2228       Unique_Node_List wq;
2229       uint wq_i = 0;
2230 
2231       for (;;) {
2232         if (n->is_Phi()) {
2233           for (uint i = 1; i < n->req(); i++) {
2234             Node* m = n->in(i);
2235             if (m != NULL) {
2236               wq.push(m);
2237             }
2238           }
2239         } else {
2240           assert(n->is_CMove(), "nothing else here");
2241           Node* m = n->in(CMoveNode::IfFalse);
2242           wq.push(m);
2243           m = n->in(CMoveNode::IfTrue);
2244           wq.push(m);
2245         }
2246         Node* orig_n = NULL;
2247         do {
2248           if (wq_i >= wq.size()) {
2249             return in(1);
2250           }
2251           n = wq.at(wq_i);
2252           wq_i++;
2253           orig_n = n;
2254           n = next(n);
2255           cont = needed(n);
2256           if (cont == Needed) {
2257             return this;
2258           }
2259         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2260       }
2261     }
2262   }
2263 
2264   return this;
2265 }
2266 
2267 #ifdef ASSERT
2268 static bool has_never_branch(Node* root) {
2269   for (uint i = 1; i < root->req(); i++) {
2270     Node* in = root->in(i);
2271     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2272       return true;
2273     }
2274   }
2275   return false;
2276 }
2277 #endif
2278 
2279 void MemoryGraphFixer::collect_memory_nodes() {
2280   Node_Stack stack(0);
2281   VectorSet visited(Thread::current()->resource_area());
2282   Node_List regions;
2283 
2284   // Walk the raw memory graph and create a mapping from CFG node to
2285   // memory node. Exclude phis for now.
2286   stack.push(_phase->C->root(), 1);
2287   do {
2288     Node* n = stack.node();
2289     int opc = n->Opcode();
2290     uint i = stack.index();
2291     if (i < n->req()) {
2292       Node* mem = NULL;
2293       if (opc == Op_Root) {
2294         Node* in = n->in(i);
2295         int in_opc = in->Opcode();
2296         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2297           mem = in->in(TypeFunc::Memory);
2298         } else if (in_opc == Op_Halt) {
2299           if (in->in(0)->is_Region()) {
2300             Node* r = in->in(0);
2301             for (uint j = 1; j < r->req(); j++) {
2302               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2303             }
2304           } else {
2305             Node* proj = in->in(0);
2306             assert(proj->is_Proj(), "");
2307             Node* in = proj->in(0);
2308             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2309             if (in->is_CallStaticJava()) {
2310               mem = in->in(TypeFunc::Memory);
2311             } else if (in->Opcode() == Op_Catch) {
2312               Node* call = in->in(0)->in(0);
2313               assert(call->is_Call(), "");
2314               mem = call->in(TypeFunc::Memory);
2315             } else if (in->Opcode() == Op_NeverBranch) {
2316               Node* head = in->in(0);
2317               assert(head->is_Region(), "unexpected infinite loop graph shape");
2318 
2319               Node* phi_mem = NULL;
2320               for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2321                 Node* u = head->fast_out(j);
2322                 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2323                   if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2324                     assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2325                     phi_mem = u;
2326                   } else if (u->adr_type() == TypePtr::BOTTOM) {
2327                     assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2328                     if (phi_mem == NULL) {
2329                       phi_mem = u;
2330                     }
2331                   }
2332                 }
2333               }
2334               if (phi_mem == NULL) {
2335                 for (uint j = 1; j < head->req(); j++) {
2336                   Node* tail = head->in(j);
2337                   if (!_phase->is_dominator(head, tail)) {
2338                     continue;
2339                   }
2340                   Node* c = tail;
2341                   while (c != head) {
2342                     if (c->is_SafePoint() && !c->is_CallLeaf()) {
2343                       Node* m =c->in(TypeFunc::Memory);
2344                       if (m->is_MergeMem()) {
2345                         m = m->as_MergeMem()->memory_at(_alias);
2346                       }
2347                       assert(mem == NULL || mem == m, "several memory states");
2348                       mem = m;
2349                     }
2350                     c = _phase->idom(c);
2351                   }
2352                   assert(mem != NULL, "should have found safepoint");
2353                 }
2354                 assert(mem != NULL, "should have found safepoint");
2355               } else {
2356                 mem = phi_mem;
2357               }
2358             }
2359           }
2360         } else {
2361 #ifdef ASSERT
2362           n->dump();
2363           in->dump();
2364 #endif
2365           ShouldNotReachHere();
2366         }
2367       } else {
2368         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2369         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2370         mem = n->in(i);
2371       }
2372       i++;
2373       stack.set_index(i);
2374       if (mem == NULL) {
2375         continue;
2376       }
2377       for (;;) {
2378         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2379           break;
2380         }
2381         if (mem->is_Phi()) {
2382           stack.push(mem, 2);
2383           mem = mem->in(1);
2384         } else if (mem->is_Proj()) {
2385           stack.push(mem, mem->req());
2386           mem = mem->in(0);
2387         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2388           mem = mem->in(TypeFunc::Memory);
2389         } else if (mem->is_MergeMem()) {
2390           MergeMemNode* mm = mem->as_MergeMem();
2391           mem = mm->memory_at(_alias);
2392         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2393           assert(_alias == Compile::AliasIdxRaw, "");
2394           stack.push(mem, mem->req());
2395           mem = mem->in(MemNode::Memory);
2396         } else {
2397 #ifdef ASSERT
2398           mem->dump();
2399 #endif
2400           ShouldNotReachHere();
2401         }
2402       }
2403     } else {
2404       if (n->is_Phi()) {
2405         // Nothing
2406       } else if (!n->is_Root()) {
2407         Node* c = get_ctrl(n);
2408         _memory_nodes.map(c->_idx, n);
2409       }
2410       stack.pop();
2411     }
2412   } while(stack.is_nonempty());
2413 
2414   // Iterate over CFG nodes in rpo and propagate memory state to
2415   // compute memory state at regions, creating new phis if needed.
2416   Node_List rpo_list;
2417   visited.Clear();
2418   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2419   Node* root = rpo_list.pop();
2420   assert(root == _phase->C->root(), "");
2421 
2422   const bool trace = false;
2423 #ifdef ASSERT
2424   if (trace) {
2425     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2426       Node* c = rpo_list.at(i);
2427       if (_memory_nodes[c->_idx] != NULL) {
2428         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2429       }
2430     }
2431   }
2432 #endif
2433   uint last = _phase->C->unique();
2434 
2435 #ifdef ASSERT
2436   uint8_t max_depth = 0;
2437   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2438     IdealLoopTree* lpt = iter.current();
2439     max_depth = MAX2(max_depth, lpt->_nest);
2440   }
2441 #endif
2442 
2443   bool progress = true;
2444   int iteration = 0;
2445   Node_List dead_phis;
2446   while (progress) {
2447     progress = false;
2448     iteration++;
2449     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2450     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2451     IdealLoopTree* last_updated_ilt = NULL;
2452     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2453       Node* c = rpo_list.at(i);
2454 
2455       Node* prev_mem = _memory_nodes[c->_idx];
2456       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2457         Node* prev_region = regions[c->_idx];
2458         Node* unique = NULL;
2459         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2460           Node* m = _memory_nodes[c->in(j)->_idx];
2461           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2462           if (m != NULL) {
2463             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2464               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2465               // continue
2466             } else if (unique == NULL) {
2467               unique = m;
2468             } else if (m == unique) {
2469               // continue
2470             } else {
2471               unique = NodeSentinel;
2472             }
2473           }
2474         }
2475         assert(unique != NULL, "empty phi???");
2476         if (unique != NodeSentinel) {
2477           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2478             dead_phis.push(prev_region);
2479           }
2480           regions.map(c->_idx, unique);
2481         } else {
2482           Node* phi = NULL;
2483           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2484             phi = prev_region;
2485             for (uint k = 1; k < c->req(); k++) {
2486               Node* m = _memory_nodes[c->in(k)->_idx];
2487               assert(m != NULL, "expect memory state");
2488               phi->set_req(k, m);
2489             }
2490           } else {
2491             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2492               Node* u = c->fast_out(j);
2493               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2494                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2495                 phi = u;
2496                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2497                   Node* m = _memory_nodes[c->in(k)->_idx];
2498                   assert(m != NULL, "expect memory state");
2499                   if (u->in(k) != m) {
2500                     phi = NULL;
2501                   }
2502                 }
2503               }
2504             }
2505             if (phi == NULL) {
2506               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2507               for (uint k = 1; k < c->req(); k++) {
2508                 Node* m = _memory_nodes[c->in(k)->_idx];
2509                 assert(m != NULL, "expect memory state");
2510                 phi->init_req(k, m);
2511               }
2512             }
2513           }
2514           assert(phi != NULL, "");
2515           regions.map(c->_idx, phi);
2516         }
2517         Node* current_region = regions[c->_idx];
2518         if (current_region != prev_region) {
2519           progress = true;
2520           if (prev_region == prev_mem) {
2521             _memory_nodes.map(c->_idx, current_region);
2522           }
2523         }
2524       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2525         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2526         assert(m != NULL, "expect memory state");
2527         if (m != prev_mem) {
2528           _memory_nodes.map(c->_idx, m);
2529           progress = true;
2530         }
2531       }
2532 #ifdef ASSERT
2533       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2534 #endif
2535     }
2536   }
2537 
2538   // Replace existing phi with computed memory state for that region
2539   // if different (could be a new phi or a dominating memory node if
2540   // that phi was found to be useless).
2541   while (dead_phis.size() > 0) {
2542     Node* n = dead_phis.pop();
2543     n->replace_by(_phase->C->top());
2544     n->destruct();
2545   }
2546   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2547     Node* c = rpo_list.at(i);
2548     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2549       Node* n = regions[c->_idx];
2550       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2551         _phase->register_new_node(n, c);
2552       }
2553     }
2554   }
2555   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2556     Node* c = rpo_list.at(i);
2557     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2558       Node* n = regions[c->_idx];
2559       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2560         Node* u = c->fast_out(i);
2561         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2562             u != n) {
2563           if (u->adr_type() == TypePtr::BOTTOM) {
2564             fix_memory_uses(u, n, n, c);
2565           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2566             _phase->lazy_replace(u, n);
2567             --i; --imax;
2568           }
2569         }
2570       }
2571     }
2572   }
2573 }
2574 
2575 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2576   Node* c = _phase->get_ctrl(n);
2577   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2578     assert(c == n->in(0), "");
2579     CallNode* call = c->as_Call();
2580     CallProjections projs;
2581     call->extract_projections(&projs, true, false);
2582     if (projs.catchall_memproj != NULL) {
2583       if (projs.fallthrough_memproj == n) {
2584         c = projs.fallthrough_catchproj;
2585       } else {
2586         assert(projs.catchall_memproj == n, "");
2587         c = projs.catchall_catchproj;
2588       }
2589     }
2590   }
2591   return c;
2592 }
2593 
2594 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2595   if (_phase->has_ctrl(n))
2596     return get_ctrl(n);
2597   else {
2598     assert (n->is_CFG(), "must be a CFG node");
2599     return n;
2600   }
2601 }
2602 
2603 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2604   return m != NULL && get_ctrl(m) == c;
2605 }
2606 
2607 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2608   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2609   Node* mem = _memory_nodes[ctrl->_idx];
2610   Node* c = ctrl;
2611   while (!mem_is_valid(mem, c) &&
2612          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2613     c = _phase->idom(c);
2614     mem = _memory_nodes[c->_idx];
2615   }
2616   if (n != NULL && mem_is_valid(mem, c)) {
2617     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2618       mem = next_mem(mem, _alias);
2619     }
2620     if (mem->is_MergeMem()) {
2621       mem = mem->as_MergeMem()->memory_at(_alias);
2622     }
2623     if (!mem_is_valid(mem, c)) {
2624       do {
2625         c = _phase->idom(c);
2626         mem = _memory_nodes[c->_idx];
2627       } while (!mem_is_valid(mem, c) &&
2628                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2629     }
2630   }
2631   assert(mem->bottom_type() == Type::MEMORY, "");
2632   return mem;
2633 }
2634 
2635 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2636   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2637     Node* use = region->fast_out(i);
2638     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2639         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2640       return true;
2641     }
2642   }
2643   return false;
2644 }
2645 
2646 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2647   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2648   const bool trace = false;
2649   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2650   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2651   GrowableArray<Node*> phis;
2652   if (mem_for_ctrl != mem) {
2653     Node* old = mem_for_ctrl;
2654     Node* prev = NULL;
2655     while (old != mem) {
2656       prev = old;
2657       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2658         assert(_alias == Compile::AliasIdxRaw, "");
2659         old = old->in(MemNode::Memory);
2660       } else if (old->Opcode() == Op_SCMemProj) {
2661         assert(_alias == Compile::AliasIdxRaw, "");
2662         old = old->in(0);
2663       } else {
2664         ShouldNotReachHere();
2665       }
2666     }
2667     assert(prev != NULL, "");
2668     if (new_ctrl != ctrl) {
2669       _memory_nodes.map(ctrl->_idx, mem);
2670       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2671     }
2672     uint input = (uint)MemNode::Memory;
2673     _phase->igvn().replace_input_of(prev, input, new_mem);
2674   } else {
2675     uses.clear();
2676     _memory_nodes.map(new_ctrl->_idx, new_mem);
2677     uses.push(new_ctrl);
2678     for(uint next = 0; next < uses.size(); next++ ) {
2679       Node *n = uses.at(next);
2680       assert(n->is_CFG(), "");
2681       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2682       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2683         Node* u = n->fast_out(i);
2684         if (!u->is_Root() && u->is_CFG() && u != n) {
2685           Node* m = _memory_nodes[u->_idx];
2686           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2687               !has_mem_phi(u) &&
2688               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2689             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2690             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2691 
2692             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2693               bool push = true;
2694               bool create_phi = true;
2695               if (_phase->is_dominator(new_ctrl, u)) {
2696                 create_phi = false;
2697               }
2698               if (create_phi) {
2699                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2700                 _phase->register_new_node(phi, u);
2701                 phis.push(phi);
2702                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2703                 if (!mem_is_valid(m, u)) {
2704                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2705                   _memory_nodes.map(u->_idx, phi);
2706                 } else {
2707                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2708                   for (;;) {
2709                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2710                     Node* next = NULL;
2711                     if (m->is_Proj()) {
2712                       next = m->in(0);
2713                     } else {
2714                       assert(m->is_Mem() || m->is_LoadStore(), "");
2715                       assert(_alias == Compile::AliasIdxRaw, "");
2716                       next = m->in(MemNode::Memory);
2717                     }
2718                     if (_phase->get_ctrl(next) != u) {
2719                       break;
2720                     }
2721                     if (next->is_MergeMem()) {
2722                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2723                       break;
2724                     }
2725                     if (next->is_Phi()) {
2726                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2727                       break;
2728                     }
2729                     m = next;
2730                   }
2731 
2732                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2733                   assert(m->is_Mem() || m->is_LoadStore(), "");
2734                   uint input = (uint)MemNode::Memory;
2735                   _phase->igvn().replace_input_of(m, input, phi);
2736                   push = false;
2737                 }
2738               } else {
2739                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2740               }
2741               if (push) {
2742                 uses.push(u);
2743               }
2744             }
2745           } else if (!mem_is_valid(m, u) &&
2746                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2747             uses.push(u);
2748           }
2749         }
2750       }
2751     }
2752     for (int i = 0; i < phis.length(); i++) {
2753       Node* n = phis.at(i);
2754       Node* r = n->in(0);
2755       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2756       for (uint j = 1; j < n->req(); j++) {
2757         Node* m = find_mem(r->in(j), NULL);
2758         _phase->igvn().replace_input_of(n, j, m);
2759         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2760       }
2761     }
2762   }
2763   uint last = _phase->C->unique();
2764   MergeMemNode* mm = NULL;
2765   int alias = _alias;
2766   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2767   // Process loads first to not miss an anti-dependency: if the memory
2768   // edge of a store is updated before a load is processed then an
2769   // anti-dependency may be missed.
2770   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2771     Node* u = mem->out(i);
2772     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2773       Node* m = find_mem(_phase->get_ctrl(u), u);
2774       if (m != mem) {
2775         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2776         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2777         --i;
2778       }
2779     }
2780   }
2781   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2782     Node* u = mem->out(i);
2783     if (u->_idx < last) {
2784       if (u->is_Mem()) {
2785         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2786           Node* m = find_mem(_phase->get_ctrl(u), u);
2787           if (m != mem) {
2788             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2789             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2790             --i;
2791           }
2792         }
2793       } else if (u->is_MergeMem()) {
2794         MergeMemNode* u_mm = u->as_MergeMem();
2795         if (u_mm->memory_at(alias) == mem) {
2796           MergeMemNode* newmm = NULL;
2797           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2798             Node* uu = u->fast_out(j);
2799             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2800             if (uu->is_Phi()) {
2801               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2802               Node* region = uu->in(0);
2803               int nb = 0;
2804               for (uint k = 1; k < uu->req(); k++) {
2805                 if (uu->in(k) == u) {
2806                   Node* m = find_mem(region->in(k), NULL);
2807                   if (m != mem) {
2808                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2809                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2810                     if (newmm != u) {
2811                       _phase->igvn().replace_input_of(uu, k, newmm);
2812                       nb++;
2813                       --jmax;
2814                     }
2815                   }
2816                 }
2817               }
2818               if (nb > 0) {
2819                 --j;
2820               }
2821             } else {
2822               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2823               if (m != mem) {
2824                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2825                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2826                 if (newmm != u) {
2827                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2828                   --j, --jmax;
2829                 }
2830               }
2831             }
2832           }
2833         }
2834       } else if (u->is_Phi()) {
2835         assert(u->bottom_type() == Type::MEMORY, "what else?");
2836         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2837           Node* region = u->in(0);
2838           bool replaced = false;
2839           for (uint j = 1; j < u->req(); j++) {
2840             if (u->in(j) == mem) {
2841               Node* m = find_mem(region->in(j), NULL);
2842               Node* nnew = m;
2843               if (m != mem) {
2844                 if (u->adr_type() == TypePtr::BOTTOM) {
2845                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2846                   nnew = mm;
2847                 }
2848                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2849                 _phase->igvn().replace_input_of(u, j, nnew);
2850                 replaced = true;
2851               }
2852             }
2853           }
2854           if (replaced) {
2855             --i;
2856           }
2857         }
2858       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2859                  u->adr_type() == NULL) {
2860         assert(u->adr_type() != NULL ||
2861                u->Opcode() == Op_Rethrow ||
2862                u->Opcode() == Op_Return ||
2863                u->Opcode() == Op_SafePoint ||
2864                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2865                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2866                u->Opcode() == Op_CallLeaf, "");
2867         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2868         if (m != mem) {
2869           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2870           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2871           --i;
2872         }
2873       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2874         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2875         if (m != mem) {
2876           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2877           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2878           --i;
2879         }
2880       } else if (u->adr_type() != TypePtr::BOTTOM &&
2881                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2882         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2883         assert(m != mem, "");
2884         // u is on the wrong slice...
2885         assert(u->is_ClearArray(), "");
2886         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2887         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2888         --i;
2889       }
2890     }
2891   }
2892 #ifdef ASSERT
2893   assert(new_mem->outcnt() > 0, "");
2894   for (int i = 0; i < phis.length(); i++) {
2895     Node* n = phis.at(i);
2896     assert(n->outcnt() > 0, "new phi must have uses now");
2897   }
2898 #endif
2899 }
2900 
2901 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2902   MergeMemNode* mm = MergeMemNode::make(mem);
2903   mm->set_memory_at(_alias, rep_proj);
2904   _phase->register_new_node(mm, rep_ctrl);
2905   return mm;
2906 }
2907 
2908 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2909   MergeMemNode* newmm = NULL;
2910   MergeMemNode* u_mm = u->as_MergeMem();
2911   Node* c = _phase->get_ctrl(u);
2912   if (_phase->is_dominator(c, rep_ctrl)) {
2913     c = rep_ctrl;
2914   } else {
2915     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2916   }
2917   if (u->outcnt() == 1) {
2918     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2919       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2920       --i;
2921     } else {
2922       _phase->igvn().rehash_node_delayed(u);
2923       u_mm->set_memory_at(_alias, rep_proj);
2924     }
2925     newmm = u_mm;
2926     _phase->set_ctrl_and_loop(u, c);
2927   } else {
2928     // can't simply clone u and then change one of its input because
2929     // it adds and then removes an edge which messes with the
2930     // DUIterator
2931     newmm = MergeMemNode::make(u_mm->base_memory());
2932     for (uint j = 0; j < u->req(); j++) {
2933       if (j < newmm->req()) {
2934         if (j == (uint)_alias) {
2935           newmm->set_req(j, rep_proj);
2936         } else if (newmm->in(j) != u->in(j)) {
2937           newmm->set_req(j, u->in(j));
2938         }
2939       } else if (j == (uint)_alias) {
2940         newmm->add_req(rep_proj);
2941       } else {
2942         newmm->add_req(u->in(j));
2943       }
2944     }
2945     if ((uint)_alias >= u->req()) {
2946       newmm->set_memory_at(_alias, rep_proj);
2947     }
2948     _phase->register_new_node(newmm, c);
2949   }
2950   return newmm;
2951 }
2952 
2953 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2954   if (phi->adr_type() == TypePtr::BOTTOM) {
2955     Node* region = phi->in(0);
2956     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2957       Node* uu = region->fast_out(j);
2958       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2959         return false;
2960       }
2961     }
2962     return true;
2963   }
2964   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2965 }
2966 
2967 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2968   uint last = _phase-> C->unique();
2969   MergeMemNode* mm = NULL;
2970   assert(mem->bottom_type() == Type::MEMORY, "");
2971   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2972     Node* u = mem->out(i);
2973     if (u != replacement && u->_idx < last) {
2974       if (u->is_MergeMem()) {
2975         MergeMemNode* u_mm = u->as_MergeMem();
2976         if (u_mm->memory_at(_alias) == mem) {
2977           MergeMemNode* newmm = NULL;
2978           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2979             Node* uu = u->fast_out(j);
2980             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2981             if (uu->is_Phi()) {
2982               if (should_process_phi(uu)) {
2983                 Node* region = uu->in(0);
2984                 int nb = 0;
2985                 for (uint k = 1; k < uu->req(); k++) {
2986                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2987                     if (newmm == NULL) {
2988                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2989                     }
2990                     if (newmm != u) {
2991                       _phase->igvn().replace_input_of(uu, k, newmm);
2992                       nb++;
2993                       --jmax;
2994                     }
2995                   }
2996                 }
2997                 if (nb > 0) {
2998                   --j;
2999                 }
3000               }
3001             } else {
3002               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
3003                 if (newmm == NULL) {
3004                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
3005                 }
3006                 if (newmm != u) {
3007                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
3008                   --j, --jmax;
3009                 }
3010               }
3011             }
3012           }
3013         }
3014       } else if (u->is_Phi()) {
3015         assert(u->bottom_type() == Type::MEMORY, "what else?");
3016         Node* region = u->in(0);
3017         if (should_process_phi(u)) {
3018           bool replaced = false;
3019           for (uint j = 1; j < u->req(); j++) {
3020             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
3021               Node* nnew = rep_proj;
3022               if (u->adr_type() == TypePtr::BOTTOM) {
3023                 if (mm == NULL) {
3024                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3025                 }
3026                 nnew = mm;
3027               }
3028               _phase->igvn().replace_input_of(u, j, nnew);
3029               replaced = true;
3030             }
3031           }
3032           if (replaced) {
3033             --i;
3034           }
3035 
3036         }
3037       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
3038                  u->adr_type() == NULL) {
3039         assert(u->adr_type() != NULL ||
3040                u->Opcode() == Op_Rethrow ||
3041                u->Opcode() == Op_Return ||
3042                u->Opcode() == Op_SafePoint ||
3043                u->Opcode() == Op_StoreIConditional ||
3044                u->Opcode() == Op_StoreLConditional ||
3045                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
3046                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
3047                u->Opcode() == Op_CallLeaf, "%s", u->Name());
3048         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3049           if (mm == NULL) {
3050             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
3051           }
3052           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
3053           --i;
3054         }
3055       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
3056         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
3057           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
3058           --i;
3059         }
3060       }
3061     }
3062   }
3063 }
3064 
3065 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
3066 : Node(ctrl, obj) {
3067   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3068 }
3069 
3070 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3071   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3072     return Type::TOP;
3073   }
3074   const Type* t = in(ValueIn)->bottom_type();
3075   if (t == TypePtr::NULL_PTR) {
3076     return t;
3077   }
3078   return t->is_oopptr();
3079 }
3080 
3081 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3082   // Either input is TOP ==> the result is TOP
3083   const Type *t2 = phase->type(in(ValueIn));
3084   if( t2 == Type::TOP ) return Type::TOP;
3085 
3086   if (t2 == TypePtr::NULL_PTR) {
3087     return t2;
3088   }
3089 
3090   const Type* type = t2->is_oopptr();
3091   return type;
3092 }
3093 
3094 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3095   Node* value = in(ValueIn);
3096   if (!needs_barrier(phase, value)) {
3097     return value;
3098   }
3099   return this;
3100 }
3101 
3102 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3103   Unique_Node_List visited;
3104   return needs_barrier_impl(phase, n, visited);
3105 }
3106 
3107 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3108   if (n == NULL) return false;
3109   if (visited.member(n)) {
3110     return false; // Been there.
3111   }
3112   visited.push(n);
3113 
3114   if (n->is_Allocate()) {
3115     // tty->print_cr("optimize barrier on alloc");
3116     return false;
3117   }
3118   if (n->is_Call()) {
3119     // tty->print_cr("optimize barrier on call");
3120     return false;
3121   }
3122 
3123   const Type* type = phase->type(n);
3124   if (type == Type::TOP) {
3125     return false;
3126   }
3127   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3128     // tty->print_cr("optimize barrier on null");
3129     return false;
3130   }
3131   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3132     // tty->print_cr("optimize barrier on constant");
3133     return false;
3134   }
3135 
3136   switch (n->Opcode()) {
3137     case Op_AddP:
3138       return true; // TODO: Can refine?
3139     case Op_LoadP:
3140     case Op_ShenandoahCompareAndExchangeN:
3141     case Op_ShenandoahCompareAndExchangeP:
3142     case Op_CompareAndExchangeN:
3143     case Op_CompareAndExchangeP:
3144     case Op_GetAndSetN:
3145     case Op_GetAndSetP:
3146       return true;
3147     case Op_Phi: {
3148       for (uint i = 1; i < n->req(); i++) {
3149         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3150       }
3151       return false;
3152     }
3153     case Op_CheckCastPP:
3154     case Op_CastPP:
3155       return needs_barrier_impl(phase, n->in(1), visited);
3156     case Op_Proj:
3157       return needs_barrier_impl(phase, n->in(0), visited);
3158     case Op_ShenandoahLoadReferenceBarrier:
3159       // tty->print_cr("optimize barrier on barrier");
3160       return false;
3161     case Op_Parm:
3162       // tty->print_cr("optimize barrier on input arg");
3163       return false;
3164     case Op_DecodeN:
3165     case Op_EncodeP:
3166       return needs_barrier_impl(phase, n->in(1), visited);
3167     case Op_LoadN:
3168       return true;
3169     case Op_CMoveN:
3170     case Op_CMoveP:
3171       return needs_barrier_impl(phase, n->in(2), visited) ||
3172              needs_barrier_impl(phase, n->in(3), visited);
3173     case Op_ShenandoahEnqueueBarrier:
3174       return needs_barrier_impl(phase, n->in(1), visited);
3175     case Op_CreateEx:
3176       return false;
3177     default:
3178       break;
3179   }
3180 #ifdef ASSERT
3181   tty->print("need barrier on?: ");
3182   tty->print_cr("ins:");
3183   n->dump(2);
3184   tty->print_cr("outs:");
3185   n->dump(-2);
3186   ShouldNotReachHere();
3187 #endif
3188   return true;
3189 }
3190 
3191 bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
3192   Unique_Node_List visited;
3193   Node_Stack stack(0);
3194   stack.push(this, 0);
3195 
3196   // Check if the barrier is actually useful: go over nodes looking for useful uses
3197   // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk
3198   // until we ran out of nodes, and then declare the barrier redundant.
3199   while (stack.size() > 0) {
3200     Node* n = stack.node();
3201     if (visited.member(n)) {
3202       stack.pop();
3203       continue;
3204     }
3205     visited.push(n);
3206     bool visit_users = false;
3207     switch (n->Opcode()) {
3208       case Op_CallStaticJava:
3209       case Op_CallDynamicJava:
3210       case Op_CallLeaf:
3211       case Op_CallLeafNoFP:
3212       case Op_CompareAndSwapL:
3213       case Op_CompareAndSwapI:
3214       case Op_CompareAndSwapB:
3215       case Op_CompareAndSwapS:
3216       case Op_CompareAndSwapN:
3217       case Op_CompareAndSwapP:
3218       case Op_CompareAndExchangeL:
3219       case Op_CompareAndExchangeI:
3220       case Op_CompareAndExchangeB:
3221       case Op_CompareAndExchangeS:
3222       case Op_CompareAndExchangeN:
3223       case Op_CompareAndExchangeP:
3224       case Op_WeakCompareAndSwapL:
3225       case Op_WeakCompareAndSwapI:
3226       case Op_WeakCompareAndSwapB:
3227       case Op_WeakCompareAndSwapS:
3228       case Op_WeakCompareAndSwapN:
3229       case Op_WeakCompareAndSwapP:
3230       case Op_ShenandoahCompareAndSwapN:
3231       case Op_ShenandoahCompareAndSwapP:
3232       case Op_ShenandoahWeakCompareAndSwapN:
3233       case Op_ShenandoahWeakCompareAndSwapP:
3234       case Op_ShenandoahCompareAndExchangeN:
3235       case Op_ShenandoahCompareAndExchangeP:
3236       case Op_GetAndSetL:
3237       case Op_GetAndSetI:
3238       case Op_GetAndSetB:
3239       case Op_GetAndSetS:
3240       case Op_GetAndSetP:
3241       case Op_GetAndSetN:
3242       case Op_GetAndAddL:
3243       case Op_GetAndAddI:
3244       case Op_GetAndAddB:
3245       case Op_GetAndAddS:
3246       case Op_ShenandoahEnqueueBarrier:
3247       case Op_FastLock:
3248       case Op_FastUnlock:
3249       case Op_Rethrow:
3250       case Op_Return:
3251       case Op_StoreB:
3252       case Op_StoreC:
3253       case Op_StoreD:
3254       case Op_StoreF:
3255       case Op_StoreL:
3256       case Op_StoreLConditional:
3257       case Op_StoreI:
3258       case Op_StoreIConditional:
3259       case Op_StoreN:
3260       case Op_StoreP:
3261       case Op_StoreVector:
3262       case Op_StrInflatedCopy:
3263       case Op_StrCompressedCopy:
3264       case Op_EncodeP:
3265       case Op_CastP2X:
3266       case Op_SafePoint:
3267       case Op_EncodeISOArray:
3268       case Op_AryEq:
3269       case Op_StrEquals:
3270       case Op_StrComp:
3271       case Op_StrIndexOf:
3272       case Op_StrIndexOfChar:
3273       case Op_HasNegatives:
3274         // Known to require barriers
3275         return false;
3276       case Op_CmpP: {
3277         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3278             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3279           // One of the sides is known null, no need for barrier.
3280         } else {
3281           return false;
3282         }
3283         break;
3284       }
3285       case Op_LoadB:
3286       case Op_LoadUB:
3287       case Op_LoadUS:
3288       case Op_LoadD:
3289       case Op_LoadF:
3290       case Op_LoadL:
3291       case Op_LoadI:
3292       case Op_LoadS:
3293       case Op_LoadN:
3294       case Op_LoadP:
3295       case Op_LoadVector: {
3296         const TypePtr* adr_type = n->adr_type();
3297         int alias_idx = Compile::current()->get_alias_index(adr_type);
3298         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3299         ciField* field = alias_type->field();
3300         bool is_static = field != NULL && field->is_static();
3301         bool is_final = field != NULL && field->is_final();
3302 
3303         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3304           // Loading the constant does not require barriers: it should be handled
3305           // as part of GC roots already.
3306         } else {
3307           return false;
3308         }
3309         break;
3310       }
3311       case Op_Conv2B:
3312       case Op_LoadRange:
3313       case Op_LoadKlass:
3314       case Op_LoadNKlass:
3315         // Do not require barriers
3316         break;
3317       case Op_AddP:
3318       case Op_CheckCastPP:
3319       case Op_CastPP:
3320       case Op_CMoveP:
3321       case Op_Phi:
3322       case Op_ShenandoahLoadReferenceBarrier:
3323         // Whether or not these need the barriers depends on their users
3324         visit_users = true;
3325         break;
3326       default: {
3327 #ifdef ASSERT
3328         fatal("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]);
3329 #else
3330         // Default to have excess barriers, rather than miss some.
3331         return false;
3332 #endif
3333       }
3334     }
3335 
3336     stack.pop();
3337     if (visit_users) {
3338       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3339         Node* user = n->fast_out(i);
3340         if (user != NULL) {
3341           stack.push(user, 0);
3342         }
3343       }
3344     }
3345   }
3346 
3347   // No need for barrier found.
3348   return true;
3349 }
3350 
3351 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3352   Node* val = in(ValueIn);
3353 
3354   const Type* val_t = igvn.type(val);
3355 
3356   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3357       val->Opcode() == Op_CastPP &&
3358       val->in(0) != NULL &&
3359       val->in(0)->Opcode() == Op_IfTrue &&
3360       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3361       val->in(0)->in(0)->is_If() &&
3362       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3363       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3364       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3365       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3366       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3367     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3368     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3369     return unc;
3370   }
3371   return NULL;
3372 }