1 /*
   2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahRuntime.hpp"
  33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/block.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  45   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  46   if ((state->enqueue_barriers_count() +
  47        state->load_reference_barriers_count()) > 0) {
  48     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  49     C->clear_major_progress();
  50     PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
  51     if (C->failing()) return false;
  52     PhaseIdealLoop::verify(igvn);
  53     DEBUG_ONLY(verify_raw_mem(C->root());)
  54     if (attempt_more_loopopts) {
  55       C->set_major_progress();
  56       int cnt = 0;
  57       if (!C->optimize_loops(cnt, igvn, LoopOptsShenandoahPostExpand)) {
  58         return false;
  59       }
  60       C->clear_major_progress();
  61     }
  62   }
  63   return true;
  64 }
  65 
  66 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
  67   if (!UseShenandoahGC) {
  68     return false;
  69   }
  70   assert(iff->is_If(), "bad input");
  71   if (iff->Opcode() != Op_If) {
  72     return false;
  73   }
  74   Node* bol = iff->in(1);
  75   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  76     return false;
  77   }
  78   Node* cmp = bol->in(1);
  79   if (cmp->Opcode() != Op_CmpI) {
  80     return false;
  81   }
  82   Node* in1 = cmp->in(1);
  83   Node* in2 = cmp->in(2);
  84   if (in2->find_int_con(-1) != 0) {
  85     return false;
  86   }
  87   if (in1->Opcode() != Op_AndI) {
  88     return false;
  89   }
  90   in2 = in1->in(2);
  91   if (in2->find_int_con(-1) != mask) {
  92     return false;
  93   }
  94   in1 = in1->in(1);
  95 
  96   return is_gc_state_load(in1);
  97 }
  98 
  99 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 100   return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 101 }
 102 
 103 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 104   if (!UseShenandoahGC) {
 105     return false;
 106   }
 107   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 108     return false;
 109   }
 110   Node* addp = n->in(MemNode::Address);
 111   if (!addp->is_AddP()) {
 112     return false;
 113   }
 114   Node* base = addp->in(AddPNode::Address);
 115   Node* off = addp->in(AddPNode::Offset);
 116   if (base->Opcode() != Op_ThreadLocal) {
 117     return false;
 118   }
 119   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 120     return false;
 121   }
 122   return true;
 123 }
 124 
 125 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 126   assert(phase->is_dominator(stop, start), "bad inputs");
 127   ResourceMark rm;
 128   Unique_Node_List wq;
 129   wq.push(start);
 130   for (uint next = 0; next < wq.size(); next++) {
 131     Node *m = wq.at(next);
 132     if (m == stop) {
 133       continue;
 134     }
 135     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 136       return true;
 137     }
 138     if (m->is_Region()) {
 139       for (uint i = 1; i < m->req(); i++) {
 140         wq.push(m->in(i));
 141       }
 142     } else {
 143       wq.push(m->in(0));
 144     }
 145   }
 146   return false;
 147 }
 148 
 149 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
 150   assert(is_gc_state_load(n), "inconsistent");
 151   Node* addp = n->in(MemNode::Address);
 152   Node* dominator = NULL;
 153   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 154     Node* u = addp->fast_out(i);
 155     assert(is_gc_state_load(u), "inconsistent");
 156     if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
 157       if (dominator == NULL) {
 158         dominator = u;
 159       } else {
 160         if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
 161           dominator = u;
 162         }
 163       }
 164     }
 165   }
 166   if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
 167     return false;
 168   }
 169   phase->igvn().replace_node(n, dominator);
 170 
 171   return true;
 172 }
 173 
 174 #ifdef ASSERT
 175 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 176   assert(phis.size() == 0, "");
 177 
 178   while (true) {
 179     if (in->bottom_type() == TypePtr::NULL_PTR) {
 180       if (trace) {tty->print_cr("NULL");}
 181     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 182       if (trace) {tty->print_cr("Non oop");}
 183     } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) {
 184       if (trace) {tty->print_cr("Java mirror");}
 185     } else {
 186       if (in->is_ConstraintCast()) {
 187         in = in->in(1);
 188         continue;
 189       } else if (in->is_AddP()) {
 190         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 191         in = in->in(AddPNode::Address);
 192         continue;
 193       } else if (in->is_Con()) {
 194         if (trace) {
 195           tty->print("Found constant");
 196           in->dump();
 197         }
 198       } else if (in->Opcode() == Op_Parm) {
 199         if (trace) {
 200           tty->print("Found argument");
 201         }
 202       } else if (in->Opcode() == Op_CreateEx) {
 203         if (trace) {
 204           tty->print("Found create-exception");
 205         }
 206       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 207         if (trace) {
 208           tty->print("Found raw LoadP (OSR argument?)");
 209         }
 210       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 211         if (t == ShenandoahOopStore) {
 212           uint i = 0;
 213           for (; i < phis.size(); i++) {
 214             Node* n = phis.node_at(i);
 215             if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
 216               break;
 217             }
 218           }
 219           if (i == phis.size()) {
 220             return false;
 221           }
 222         }
 223         barriers_used.push(in);
 224         if (trace) {tty->print("Found barrier"); in->dump();}
 225       } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
 226         if (t != ShenandoahOopStore) {
 227           in = in->in(1);
 228           continue;
 229         }
 230         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 231         phis.push(in, in->req());
 232         in = in->in(1);
 233         continue;
 234       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 235         if (trace) {
 236           tty->print("Found alloc");
 237           in->in(0)->dump();
 238         }
 239       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 240         if (trace) {
 241           tty->print("Found Java call");
 242         }
 243       } else if (in->is_Phi()) {
 244         if (!visited.test_set(in->_idx)) {
 245           if (trace) {tty->print("Pushed phi:"); in->dump();}
 246           phis.push(in, 2);
 247           in = in->in(1);
 248           continue;
 249         }
 250         if (trace) {tty->print("Already seen phi:"); in->dump();}
 251       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 252         if (!visited.test_set(in->_idx)) {
 253           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 254           phis.push(in, CMoveNode::IfTrue);
 255           in = in->in(CMoveNode::IfFalse);
 256           continue;
 257         }
 258         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 259       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 260         in = in->in(1);
 261         continue;
 262       } else {
 263         return false;
 264       }
 265     }
 266     bool cont = false;
 267     while (phis.is_nonempty()) {
 268       uint idx = phis.index();
 269       Node* phi = phis.node();
 270       if (idx >= phi->req()) {
 271         if (trace) {tty->print("Popped phi:"); phi->dump();}
 272         phis.pop();
 273         continue;
 274       }
 275       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 276       in = phi->in(idx);
 277       phis.set_index(idx+1);
 278       cont = true;
 279       break;
 280     }
 281     if (!cont) {
 282       break;
 283     }
 284   }
 285   return true;
 286 }
 287 
 288 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 289   if (n1 != NULL) {
 290     n1->dump(+10);
 291   }
 292   if (n2 != NULL) {
 293     n2->dump(+10);
 294   }
 295   fatal("%s", msg);
 296 }
 297 
 298 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 299   ResourceMark rm;
 300   Unique_Node_List wq;
 301   GrowableArray<Node*> barriers;
 302   Unique_Node_List barriers_used;
 303   Node_Stack phis(0);
 304   VectorSet visited(Thread::current()->resource_area());
 305   const bool trace = false;
 306   const bool verify_no_useless_barrier = false;
 307 
 308   wq.push(root);
 309   for (uint next = 0; next < wq.size(); next++) {
 310     Node *n = wq.at(next);
 311     if (n->is_Load()) {
 312       const bool trace = false;
 313       if (trace) {tty->print("Verifying"); n->dump();}
 314       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 315         if (trace) {tty->print_cr("Load range/klass");}
 316       } else {
 317         const TypePtr* adr_type = n->as_Load()->adr_type();
 318 
 319         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 320           if (trace) {tty->print_cr("Mark load");}
 321         } else if (adr_type->isa_instptr() &&
 322                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 323                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 324           if (trace) {tty->print_cr("Reference.get()");}
 325         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 326           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 327         }
 328       }
 329     } else if (n->is_Store()) {
 330       const bool trace = false;
 331 
 332       if (trace) {tty->print("Verifying"); n->dump();}
 333       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 334         Node* adr = n->in(MemNode::Address);
 335         bool verify = true;
 336 
 337         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 338           adr = adr->in(AddPNode::Address);
 339           if (adr->is_AddP()) {
 340             assert(adr->in(AddPNode::Base)->is_top(), "");
 341             adr = adr->in(AddPNode::Address);
 342             if (adr->Opcode() == Op_LoadP &&
 343                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 344                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 345                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 346               if (trace) {tty->print_cr("SATB prebarrier");}
 347               verify = false;
 348             }
 349           }
 350         }
 351 
 352         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 353           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 354         }
 355       }
 356       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 357         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 358       }
 359     } else if (n->Opcode() == Op_CmpP) {
 360       const bool trace = false;
 361 
 362       Node* in1 = n->in(1);
 363       Node* in2 = n->in(2);
 364       if (in1->bottom_type()->isa_oopptr()) {
 365         if (trace) {tty->print("Verifying"); n->dump();}
 366 
 367         bool mark_inputs = false;
 368         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 369             (in1->is_Con() || in2->is_Con())) {
 370           if (trace) {tty->print_cr("Comparison against a constant");}
 371           mark_inputs = true;
 372         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 373                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 374           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 375           mark_inputs = true;
 376         } else {
 377           assert(in2->bottom_type()->isa_oopptr(), "");
 378 
 379           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 380               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 381             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 382           }
 383         }
 384         if (verify_no_useless_barrier &&
 385             mark_inputs &&
 386             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 387              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 388           phis.clear();
 389           visited.Reset();
 390         }
 391       }
 392     } else if (n->is_LoadStore()) {
 393       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 394           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 395         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 396       }
 397 
 398       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 399         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 400       }
 401     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 402       CallNode* call = n->as_Call();
 403 
 404       static struct {
 405         const char* name;
 406         struct {
 407           int pos;
 408           verify_type t;
 409         } args[6];
 410       } calls[] = {
 411         "aescrypt_encryptBlock",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 413           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "aescrypt_decryptBlock",
 415         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 416           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "multiplyToLen",
 418         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "squareToLen",
 421         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "montgomery_multiply",
 424         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 425           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "montgomery_square",
 427         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "mulAdd",
 430         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "vectorizedMismatch",
 433         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "updateBytesCRC32",
 436         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "updateBytesAdler32",
 439         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "updateBytesCRC32C",
 442         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444         "counterMode_AESCrypt",
 445         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 446           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 447         "cipherBlockChaining_encryptAESCrypt",
 448         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 449           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 450         "cipherBlockChaining_decryptAESCrypt",
 451         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 452           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 453         "shenandoah_clone_barrier",
 454         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 455           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 456         "ghash_processBlocks",
 457         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 458           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 459         "sha1_implCompress",
 460         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 461           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 462         "sha256_implCompress",
 463         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 464           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 465         "sha512_implCompress",
 466         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 467           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 468         "sha1_implCompressMB",
 469         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 470           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 471         "sha256_implCompressMB",
 472         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 473           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 474         "sha512_implCompressMB",
 475         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 476           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 477         "encodeBlock",
 478         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 479           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 480       };
 481 
 482       if (call->is_call_to_arraycopystub()) {
 483         Node* dest = NULL;
 484         const TypeTuple* args = n->as_Call()->_tf->domain();
 485         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 486           if (args->field_at(i)->isa_ptr()) {
 487             j++;
 488             if (j == 2) {
 489               dest = n->in(i);
 490               break;
 491             }
 492           }
 493         }
 494         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 495             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 496           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 497         }
 498       } else if (strlen(call->_name) > 5 &&
 499                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 500         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 501           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 502         }
 503       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 504         // skip
 505       } else {
 506         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 507         int i = 0;
 508         for (; i < calls_len; i++) {
 509           if (!strcmp(calls[i].name, call->_name)) {
 510             break;
 511           }
 512         }
 513         if (i != calls_len) {
 514           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 515           for (uint j = 0; j < args_len; j++) {
 516             int pos = calls[i].args[j].pos;
 517             if (pos == -1) {
 518               break;
 519             }
 520             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 521               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 522             }
 523           }
 524           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 525             if (call->in(j)->bottom_type()->make_ptr() &&
 526                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 527               uint k = 0;
 528               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 529               if (k == args_len) {
 530                 fatal("arg %d for call %s not covered", j, call->_name);
 531               }
 532             }
 533           }
 534         } else {
 535           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 536             if (call->in(j)->bottom_type()->make_ptr() &&
 537                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 538               fatal("%s not covered", call->_name);
 539             }
 540           }
 541         }
 542       }
 543     } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 544       // skip
 545     } else if (n->is_AddP()
 546                || n->is_Phi()
 547                || n->is_ConstraintCast()
 548                || n->Opcode() == Op_Return
 549                || n->Opcode() == Op_CMoveP
 550                || n->Opcode() == Op_CMoveN
 551                || n->Opcode() == Op_Rethrow
 552                || n->is_MemBar()
 553                || n->Opcode() == Op_Conv2B
 554                || n->Opcode() == Op_SafePoint
 555                || n->is_CallJava()
 556                || n->Opcode() == Op_Unlock
 557                || n->Opcode() == Op_EncodeP
 558                || n->Opcode() == Op_DecodeN) {
 559       // nothing to do
 560     } else {
 561       static struct {
 562         int opcode;
 563         struct {
 564           int pos;
 565           verify_type t;
 566         } inputs[2];
 567       } others[] = {
 568         Op_FastLock,
 569         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 570         Op_Lock,
 571         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 572         Op_ArrayCopy,
 573         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 574         Op_StrCompressedCopy,
 575         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 576         Op_StrInflatedCopy,
 577         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 578         Op_AryEq,
 579         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 580         Op_StrIndexOf,
 581         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 582         Op_StrComp,
 583         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 584         Op_StrEquals,
 585         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 586         Op_EncodeISOArray,
 587         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 588         Op_HasNegatives,
 589         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 590         Op_CastP2X,
 591         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 592         Op_StrIndexOfChar,
 593         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 594       };
 595 
 596       const int others_len = sizeof(others) / sizeof(others[0]);
 597       int i = 0;
 598       for (; i < others_len; i++) {
 599         if (others[i].opcode == n->Opcode()) {
 600           break;
 601         }
 602       }
 603       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 604       if (i != others_len) {
 605         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 606         for (uint j = 0; j < inputs_len; j++) {
 607           int pos = others[i].inputs[j].pos;
 608           if (pos == -1) {
 609             break;
 610           }
 611           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 612             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 613           }
 614         }
 615         for (uint j = 1; j < stop; j++) {
 616           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 617               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 618             uint k = 0;
 619             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 620             if (k == inputs_len) {
 621               fatal("arg %d for node %s not covered", j, n->Name());
 622             }
 623           }
 624         }
 625       } else {
 626         for (uint j = 1; j < stop; j++) {
 627           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 628               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 629             fatal("%s not covered", n->Name());
 630           }
 631         }
 632       }
 633     }
 634 
 635     if (n->is_SafePoint()) {
 636       SafePointNode* sfpt = n->as_SafePoint();
 637       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 638         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 639           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 640             phis.clear();
 641             visited.Reset();
 642           }
 643         }
 644       }
 645     }
 646   }
 647 
 648   if (verify_no_useless_barrier) {
 649     for (int i = 0; i < barriers.length(); i++) {
 650       Node* n = barriers.at(i);
 651       if (!barriers_used.member(n)) {
 652         tty->print("XXX useless barrier"); n->dump(-2);
 653         ShouldNotReachHere();
 654       }
 655     }
 656   }
 657 }
 658 #endif
 659 
 660 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 661   // That both nodes have the same control is not sufficient to prove
 662   // domination, verify that there's no path from d to n
 663   ResourceMark rm;
 664   Unique_Node_List wq;
 665   wq.push(d);
 666   for (uint next = 0; next < wq.size(); next++) {
 667     Node *m = wq.at(next);
 668     if (m == n) {
 669       return false;
 670     }
 671     if (m->is_Phi() && m->in(0)->is_Loop()) {
 672       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 673     } else {
 674       for (uint i = 0; i < m->req(); i++) {
 675         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 676           wq.push(m->in(i));
 677         }
 678       }
 679     }
 680   }
 681   return true;
 682 }
 683 
 684 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 685   if (d_c != n_c) {
 686     return phase->is_dominator(d_c, n_c);
 687   }
 688   return is_dominator_same_ctrl(d_c, d, n, phase);
 689 }
 690 
 691 Node* next_mem(Node* mem, int alias) {
 692   Node* res = NULL;
 693   if (mem->is_Proj()) {
 694     res = mem->in(0);
 695   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 696     res = mem->in(TypeFunc::Memory);
 697   } else if (mem->is_Phi()) {
 698     res = mem->in(1);
 699   } else if (mem->is_MergeMem()) {
 700     res = mem->as_MergeMem()->memory_at(alias);
 701   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 702     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 703     res = mem->in(MemNode::Memory);
 704   } else {
 705 #ifdef ASSERT
 706     mem->dump();
 707 #endif
 708     ShouldNotReachHere();
 709   }
 710   return res;
 711 }
 712 
 713 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 714   Node* iffproj = NULL;
 715   while (c != dom) {
 716     Node* next = phase->idom(c);
 717     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 718     if (c->is_Region()) {
 719       ResourceMark rm;
 720       Unique_Node_List wq;
 721       wq.push(c);
 722       for (uint i = 0; i < wq.size(); i++) {
 723         Node *n = wq.at(i);
 724         if (n == next) {
 725           continue;
 726         }
 727         if (n->is_Region()) {
 728           for (uint j = 1; j < n->req(); j++) {
 729             wq.push(n->in(j));
 730           }
 731         } else {
 732           wq.push(n->in(0));
 733         }
 734       }
 735       for (uint i = 0; i < wq.size(); i++) {
 736         Node *n = wq.at(i);
 737         assert(n->is_CFG(), "");
 738         if (n->is_Multi()) {
 739           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 740             Node* u = n->fast_out(j);
 741             if (u->is_CFG()) {
 742               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 743                 return NodeSentinel;
 744               }
 745             }
 746           }
 747         }
 748       }
 749     } else  if (c->is_Proj()) {
 750       if (c->is_IfProj()) {
 751         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 752           // continue;
 753         } else {
 754           if (!allow_one_proj) {
 755             return NodeSentinel;
 756           }
 757           if (iffproj == NULL) {
 758             iffproj = c;
 759           } else {
 760             return NodeSentinel;
 761           }
 762         }
 763       } else if (c->Opcode() == Op_JumpProj) {
 764         return NodeSentinel; // unsupported
 765       } else if (c->Opcode() == Op_CatchProj) {
 766         return NodeSentinel; // unsupported
 767       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 768         return NodeSentinel; // unsupported
 769       } else {
 770         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 771       }
 772     }
 773     c = next;
 774   }
 775   return iffproj;
 776 }
 777 
 778 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 779   ResourceMark rm;
 780   VectorSet wq(Thread::current()->resource_area());
 781   wq.set(mem->_idx);
 782   mem_ctrl = phase->ctrl_or_self(mem);
 783   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 784     mem = next_mem(mem, alias);
 785     if (wq.test_set(mem->_idx)) {
 786       return NULL;
 787     }
 788     mem_ctrl = phase->ctrl_or_self(mem);
 789   }
 790   if (mem->is_MergeMem()) {
 791     mem = mem->as_MergeMem()->memory_at(alias);
 792     mem_ctrl = phase->ctrl_or_self(mem);
 793   }
 794   return mem;
 795 }
 796 
 797 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 798   Node* mem = NULL;
 799   Node* c = ctrl;
 800   do {
 801     if (c->is_Region()) {
 802       Node* phi_bottom = NULL;
 803       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 804         Node* u = c->fast_out(i);
 805         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 806           if (u->adr_type() == TypePtr::BOTTOM) {
 807             mem = u;
 808           }
 809         }
 810       }
 811     } else {
 812       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 813         CallProjections projs;
 814         c->as_Call()->extract_projections(&projs, true, false);
 815         if (projs.fallthrough_memproj != NULL) {
 816           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 817             if (projs.catchall_memproj == NULL) {
 818               mem = projs.fallthrough_memproj;
 819             } else {
 820               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 821                 mem = projs.fallthrough_memproj;
 822               } else {
 823                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 824                 mem = projs.catchall_memproj;
 825               }
 826             }
 827           }
 828         } else {
 829           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 830           if (proj != NULL &&
 831               proj->adr_type() == TypePtr::BOTTOM) {
 832             mem = proj;
 833           }
 834         }
 835       } else {
 836         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 837           Node* u = c->fast_out(i);
 838           if (u->is_Proj() &&
 839               u->bottom_type() == Type::MEMORY &&
 840               u->adr_type() == TypePtr::BOTTOM) {
 841               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 842               assert(mem == NULL, "only one proj");
 843               mem = u;
 844           }
 845         }
 846         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 847       }
 848     }
 849     c = phase->idom(c);
 850   } while (mem == NULL);
 851   return mem;
 852 }
 853 
 854 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 855   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 856     Node* u = n->fast_out(i);
 857     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 858       uses.push(u);
 859     }
 860   }
 861 }
 862 
 863 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 864   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 865   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 866   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 867   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 868   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 869   phase->lazy_replace(outer, new_outer);
 870   phase->lazy_replace(le, new_le);
 871   inner->clear_strip_mined();
 872 }
 873 
 874 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
 875                                                   PhaseIdealLoop* phase) {
 876   IdealLoopTree* loop = phase->get_loop(ctrl);
 877   Node* thread = new ThreadLocalNode();
 878   phase->register_new_node(thread, ctrl);
 879   Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 880   phase->set_ctrl(offset, phase->C->root());
 881   Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
 882   phase->register_new_node(gc_state_addr, ctrl);
 883   uint gc_state_idx = Compile::AliasIdxRaw;
 884   const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
 885   debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 886 
 887   Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
 888   phase->register_new_node(gc_state, ctrl);
 889   Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
 890   phase->register_new_node(heap_stable_and, ctrl);
 891   Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
 892   phase->register_new_node(heap_stable_cmp, ctrl);
 893   Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
 894   phase->register_new_node(heap_stable_test, ctrl);
 895   IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 896   phase->register_control(heap_stable_iff, loop, ctrl);
 897 
 898   heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
 899   phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
 900   ctrl = new IfTrueNode(heap_stable_iff);
 901   phase->register_control(ctrl, loop, heap_stable_iff);
 902 
 903   assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
 904 }
 905 
 906 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 907   const Type* val_t = phase->igvn().type(val);
 908   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 909     IdealLoopTree* loop = phase->get_loop(ctrl);
 910     Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
 911     phase->register_new_node(null_cmp, ctrl);
 912     Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
 913     phase->register_new_node(null_test, ctrl);
 914     IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 915     phase->register_control(null_iff, loop, ctrl);
 916     ctrl = new IfTrueNode(null_iff);
 917     phase->register_control(ctrl, loop, null_iff);
 918     null_ctrl = new IfFalseNode(null_iff);
 919     phase->register_control(null_ctrl, loop, null_iff);
 920   }
 921 }
 922 
 923 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 924   IdealLoopTree *loop = phase->get_loop(c);
 925   Node* iff = unc_ctrl->in(0);
 926   assert(iff->is_If(), "broken");
 927   Node* new_iff = iff->clone();
 928   new_iff->set_req(0, c);
 929   phase->register_control(new_iff, loop, c);
 930   Node* iffalse = new IfFalseNode(new_iff->as_If());
 931   phase->register_control(iffalse, loop, new_iff);
 932   Node* iftrue = new IfTrueNode(new_iff->as_If());
 933   phase->register_control(iftrue, loop, new_iff);
 934   c = iftrue;
 935   const Type *t = phase->igvn().type(val);
 936   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 937   Node* uncasted_val = val->in(1);
 938   val = new CastPPNode(uncasted_val, t);
 939   val->init_req(0, c);
 940   phase->register_new_node(val, c);
 941   return val;
 942 }
 943 
 944 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 945                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 946   IfNode* iff = unc_ctrl->in(0)->as_If();
 947   Node* proj = iff->proj_out(0);
 948   assert(proj != unc_ctrl, "bad projection");
 949   Node* use = proj->unique_ctrl_out();
 950 
 951   assert(use == unc || use->is_Region(), "what else?");
 952 
 953   uses.clear();
 954   if (use == unc) {
 955     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 956     for (uint i = 1; i < unc->req(); i++) {
 957       Node* n = unc->in(i);
 958       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 959         uses.push(n);
 960       }
 961     }
 962   } else {
 963     assert(use->is_Region(), "what else?");
 964     uint idx = 1;
 965     for (; use->in(idx) != proj; idx++);
 966     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 967       Node* u = use->fast_out(i);
 968       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 969         uses.push(u->in(idx));
 970       }
 971     }
 972   }
 973   for(uint next = 0; next < uses.size(); next++ ) {
 974     Node *n = uses.at(next);
 975     assert(phase->get_ctrl(n) == proj, "bad control");
 976     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 977     if (n->in(0) == proj) {
 978       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 979     }
 980     for (uint i = 0; i < n->req(); i++) {
 981       Node* m = n->in(i);
 982       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 983         uses.push(m);
 984       }
 985     }
 986   }
 987 
 988   phase->igvn().rehash_node_delayed(use);
 989   int nb = use->replace_edge(proj, new_unc_ctrl);
 990   assert(nb == 1, "only use expected");
 991 }
 992 
 993 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 994   IdealLoopTree *loop = phase->get_loop(ctrl);
 995   Node* raw_rbtrue = new CastP2XNode(ctrl, val);
 996   phase->register_new_node(raw_rbtrue, ctrl);
 997   Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 998   phase->register_new_node(cset_offset, ctrl);
 999   Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1000   phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
1001   Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
1002   phase->register_new_node(in_cset_fast_test_adr, ctrl);
1003   uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1004   const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1005   debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1006   Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1007   phase->register_new_node(in_cset_fast_test_load, ctrl);
1008   Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1009   phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1010   Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1011   phase->register_new_node(in_cset_fast_test_test, ctrl);
1012   IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1013   phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1014 
1015   not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1016   phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1017 
1018   ctrl = new IfFalseNode(in_cset_fast_test_iff);
1019   phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1020 }
1021 
1022 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) {
1023   IdealLoopTree*loop = phase->get_loop(ctrl);
1024   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
1025 
1026   // The slow path stub consumes and produces raw memory in addition
1027   // to the existing memory edges
1028   Node* base = find_bottom_mem(ctrl, phase);
1029   MergeMemNode* mm = MergeMemNode::make(base);
1030   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1031   phase->register_new_node(mm, ctrl);
1032 
1033   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(),
1034                                 CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier),
1035                                 "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1036   call->init_req(TypeFunc::Control, ctrl);
1037   call->init_req(TypeFunc::I_O, phase->C->top());
1038   call->init_req(TypeFunc::Memory, mm);
1039   call->init_req(TypeFunc::FramePtr, phase->C->top());
1040   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1041   call->init_req(TypeFunc::Parms, val);
1042   phase->register_control(call, loop, ctrl);
1043   ctrl = new ProjNode(call, TypeFunc::Control);
1044   phase->register_control(ctrl, loop, call);
1045   result_mem = new ProjNode(call, TypeFunc::Memory);
1046   phase->register_new_node(result_mem, call);
1047   val = new ProjNode(call, TypeFunc::Parms);
1048   phase->register_new_node(val, call);
1049   val = new CheckCastPPNode(ctrl, val, obj_type);
1050   phase->register_new_node(val, ctrl);
1051 }
1052 
1053 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1054   Node* ctrl = phase->get_ctrl(barrier);
1055   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1056 
1057   // Update the control of all nodes that should be after the
1058   // barrier control flow
1059   uses.clear();
1060   // Every node that is control dependent on the barrier's input
1061   // control will be after the expanded barrier. The raw memory (if
1062   // its memory is control dependent on the barrier's input control)
1063   // must stay above the barrier.
1064   uses_to_ignore.clear();
1065   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1066     uses_to_ignore.push(init_raw_mem);
1067   }
1068   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1069     Node *n = uses_to_ignore.at(next);
1070     for (uint i = 0; i < n->req(); i++) {
1071       Node* in = n->in(i);
1072       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1073         uses_to_ignore.push(in);
1074       }
1075     }
1076   }
1077   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1078     Node* u = ctrl->fast_out(i);
1079     if (u->_idx < last &&
1080         u != barrier &&
1081         !uses_to_ignore.member(u) &&
1082         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1083         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1084       Node* old_c = phase->ctrl_or_self(u);
1085       Node* c = old_c;
1086       if (c != ctrl ||
1087           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1088           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1089         phase->igvn().rehash_node_delayed(u);
1090         int nb = u->replace_edge(ctrl, region);
1091         if (u->is_CFG()) {
1092           if (phase->idom(u) == ctrl) {
1093             phase->set_idom(u, region, phase->dom_depth(region));
1094           }
1095         } else if (phase->get_ctrl(u) == ctrl) {
1096           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1097           uses.push(u);
1098         }
1099         assert(nb == 1, "more than 1 ctrl input?");
1100         --i, imax -= nb;
1101       }
1102     }
1103   }
1104 }
1105 
1106 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1107   Node* region = NULL;
1108   while (c != ctrl) {
1109     if (c->is_Region()) {
1110       region = c;
1111     }
1112     c = phase->idom(c);
1113   }
1114   assert(region != NULL, "");
1115   Node* phi = new PhiNode(region, n->bottom_type());
1116   for (uint j = 1; j < region->req(); j++) {
1117     Node* in = region->in(j);
1118     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1119       phi->init_req(j, n);
1120     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1121       phi->init_req(j, n_clone);
1122     } else {
1123       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1124     }
1125   }
1126   phase->register_new_node(phi, region);
1127   return phi;
1128 }
1129 
1130 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1131   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1132 
1133   Unique_Node_List uses;
1134   for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1135     Node* barrier = state->enqueue_barrier(i);
1136     Node* ctrl = phase->get_ctrl(barrier);
1137     IdealLoopTree* loop = phase->get_loop(ctrl);
1138     if (loop->_head->is_OuterStripMinedLoop()) {
1139       // Expanding a barrier here will break loop strip mining
1140       // verification. Transform the loop so the loop nest doesn't
1141       // appear as strip mined.
1142       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1143       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1144     }
1145   }
1146 
1147   Node_Stack stack(0);
1148   Node_List clones;
1149   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1150     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1151     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1152       continue;
1153     }
1154 
1155     Node* ctrl = phase->get_ctrl(lrb);
1156     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1157 
1158     CallStaticJavaNode* unc = NULL;
1159     Node* unc_ctrl = NULL;
1160     Node* uncasted_val = val;
1161 
1162     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1163       Node* u = lrb->fast_out(i);
1164       if (u->Opcode() == Op_CastPP &&
1165           u->in(0) != NULL &&
1166           phase->is_dominator(u->in(0), ctrl)) {
1167         const Type* u_t = phase->igvn().type(u);
1168 
1169         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1170             u->in(0)->Opcode() == Op_IfTrue &&
1171             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1172             u->in(0)->in(0)->is_If() &&
1173             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1174             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1175             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1176             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1177             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1178           IdealLoopTree* loop = phase->get_loop(ctrl);
1179           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1180 
1181           if (!unc_loop->is_member(loop)) {
1182             continue;
1183           }
1184 
1185           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1186           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1187           if (branch == NodeSentinel) {
1188             continue;
1189           }
1190 
1191           phase->igvn().replace_input_of(u, 1, val);
1192           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1193           phase->set_ctrl(u, u->in(0));
1194           phase->set_ctrl(lrb, u->in(0));
1195           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1196           unc_ctrl = u->in(0);
1197           val = u;
1198 
1199           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1200             Node* u = val->fast_out(j);
1201             if (u == lrb) continue;
1202             phase->igvn().rehash_node_delayed(u);
1203             int nb = u->replace_edge(val, lrb);
1204             --j; jmax -= nb;
1205           }
1206 
1207           RegionNode* r = new RegionNode(3);
1208           IfNode* iff = unc_ctrl->in(0)->as_If();
1209 
1210           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1211           Node* unc_ctrl_clone = unc_ctrl->clone();
1212           phase->register_control(unc_ctrl_clone, loop, iff);
1213           Node* c = unc_ctrl_clone;
1214           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1215           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1216 
1217           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1218           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1219           phase->lazy_replace(c, unc_ctrl);
1220           c = NULL;;
1221           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1222           phase->set_ctrl(val, unc_ctrl_clone);
1223 
1224           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1225           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1226           Node* iff_proj = iff->proj_out(0);
1227           r->init_req(2, iff_proj);
1228           phase->register_control(r, phase->ltree_root(), iff);
1229 
1230           Node* new_bol = new_iff->in(1)->clone();
1231           Node* new_cmp = new_bol->in(1)->clone();
1232           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1233           assert(new_cmp->in(1) == val->in(1), "broken");
1234           new_bol->set_req(1, new_cmp);
1235           new_cmp->set_req(1, lrb);
1236           phase->register_new_node(new_bol, new_iff->in(0));
1237           phase->register_new_node(new_cmp, new_iff->in(0));
1238           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1239           phase->igvn().replace_input_of(new_cast, 1, lrb);
1240 
1241           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1242             Node* u = lrb->fast_out(i);
1243             if (u == new_cast || u == new_cmp) {
1244               continue;
1245             }
1246             phase->igvn().rehash_node_delayed(u);
1247             int nb = u->replace_edge(lrb, new_cast);
1248             assert(nb > 0, "no update?");
1249             --i; imax -= nb;
1250           }
1251 
1252           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1253             Node* u = val->fast_out(i);
1254             if (u == lrb) {
1255               continue;
1256             }
1257             phase->igvn().rehash_node_delayed(u);
1258             int nb = u->replace_edge(val, new_cast);
1259             assert(nb > 0, "no update?");
1260             --i; imax -= nb;
1261           }
1262 
1263           ctrl = unc_ctrl_clone;
1264           phase->set_ctrl_and_loop(lrb, ctrl);
1265           break;
1266         }
1267       }
1268     }
1269     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1270       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1271       CallProjections projs;
1272       call->extract_projections(&projs, false, false);
1273 
1274       Node* lrb_clone = lrb->clone();
1275       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1276       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1277 
1278       stack.push(lrb, 0);
1279       clones.push(lrb_clone);
1280 
1281       do {
1282         assert(stack.size() == clones.size(), "");
1283         Node* n = stack.node();
1284 #ifdef ASSERT
1285         if (n->is_Load()) {
1286           Node* mem = n->in(MemNode::Memory);
1287           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1288             Node* u = mem->fast_out(j);
1289             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1290           }
1291         }
1292 #endif
1293         uint idx = stack.index();
1294         Node* n_clone = clones.at(clones.size()-1);
1295         if (idx < n->outcnt()) {
1296           Node* u = n->raw_out(idx);
1297           Node* c = phase->ctrl_or_self(u);
1298           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1299             stack.set_index(idx+1);
1300             assert(!u->is_CFG(), "");
1301             stack.push(u, 0);
1302             Node* u_clone = u->clone();
1303             int nb = u_clone->replace_edge(n, n_clone);
1304             assert(nb > 0, "should have replaced some uses");
1305             phase->register_new_node(u_clone, projs.catchall_catchproj);
1306             clones.push(u_clone);
1307             phase->set_ctrl(u, projs.fallthrough_catchproj);
1308           } else {
1309             bool replaced = false;
1310             if (u->is_Phi()) {
1311               for (uint k = 1; k < u->req(); k++) {
1312                 if (u->in(k) == n) {
1313                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1314                     phase->igvn().replace_input_of(u, k, n_clone);
1315                     replaced = true;
1316                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1317                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1318                     replaced = true;
1319                   }
1320                 }
1321               }
1322             } else {
1323               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1324                 phase->igvn().rehash_node_delayed(u);
1325                 int nb = u->replace_edge(n, n_clone);
1326                 assert(nb > 0, "should have replaced some uses");
1327                 replaced = true;
1328               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1329                 phase->igvn().rehash_node_delayed(u);
1330                 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1331                 assert(nb > 0, "should have replaced some uses");
1332                 replaced = true;
1333               }
1334             }
1335             if (!replaced) {
1336               stack.set_index(idx+1);
1337             }
1338           }
1339         } else {
1340           stack.pop();
1341           clones.pop();
1342         }
1343       } while (stack.size() > 0);
1344       assert(stack.size() == 0 && clones.size() == 0, "");
1345     }
1346   }
1347 
1348   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1349     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1350     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1351       continue;
1352     }
1353     Node* ctrl = phase->get_ctrl(lrb);
1354     IdealLoopTree* loop = phase->get_loop(ctrl);
1355     if (loop->_head->is_OuterStripMinedLoop()) {
1356       // Expanding a barrier here will break loop strip mining
1357       // verification. Transform the loop so the loop nest doesn't
1358       // appear as strip mined.
1359       OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1360       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1361     }
1362   }
1363 
1364   // Expand load-reference-barriers
1365   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1366   Unique_Node_List uses_to_ignore;
1367   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1368     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1369     if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1370       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1371       continue;
1372     }
1373     uint last = phase->C->unique();
1374     Node* ctrl = phase->get_ctrl(lrb);
1375     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1376 
1377 
1378     Node* orig_ctrl = ctrl;
1379 
1380     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1381     Node* init_raw_mem = raw_mem;
1382     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1383 
1384     IdealLoopTree *loop = phase->get_loop(ctrl);
1385     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1386     Node* unc_ctrl = NULL;
1387     if (unc != NULL) {
1388       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1389         unc = NULL;
1390       } else {
1391         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1392       }
1393     }
1394 
1395     Node* uncasted_val = val;
1396     if (unc != NULL) {
1397       uncasted_val = val->in(1);
1398     }
1399 
1400     Node* heap_stable_ctrl = NULL;
1401     Node* null_ctrl = NULL;
1402 
1403     assert(val->bottom_type()->make_oopptr(), "need oop");
1404     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1405 
1406     enum { _heap_stable = 1, _not_cset, _fwded, _evac_path, _null_path, PATH_LIMIT };
1407     Node* region = new RegionNode(PATH_LIMIT);
1408     Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1409     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1410 
1411     // Stable path.
1412     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1413     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1414 
1415     // Heap stable case
1416     region->init_req(_heap_stable, heap_stable_ctrl);
1417     val_phi->init_req(_heap_stable, uncasted_val);
1418     raw_mem_phi->init_req(_heap_stable, raw_mem);
1419 
1420     Node* reg2_ctrl = NULL;
1421     // Null case
1422     test_null(ctrl, val, null_ctrl, phase);
1423     if (null_ctrl != NULL) {
1424       reg2_ctrl = null_ctrl->in(0);
1425       region->init_req(_null_path, null_ctrl);
1426       val_phi->init_req(_null_path, uncasted_val);
1427       raw_mem_phi->init_req(_null_path, raw_mem);
1428     } else {
1429       region->del_req(_null_path);
1430       val_phi->del_req(_null_path);
1431       raw_mem_phi->del_req(_null_path);
1432     }
1433 
1434     // Test for in-cset.
1435     // Wires !in_cset(obj) to slot 2 of region and phis
1436     Node* not_cset_ctrl = NULL;
1437     in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1438     if (not_cset_ctrl != NULL) {
1439       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1440       region->init_req(_not_cset, not_cset_ctrl);
1441       val_phi->init_req(_not_cset, uncasted_val);
1442       raw_mem_phi->init_req(_not_cset, raw_mem);
1443     }
1444 
1445     // Resolve object when orig-value is in cset.
1446     // Make the unconditional resolve for fwdptr.
1447     Node* new_val = uncasted_val;
1448     if (unc_ctrl != NULL) {
1449       // Clone the null check in this branch to allow implicit null check
1450       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1451       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1452 
1453       IfNode* iff = unc_ctrl->in(0)->as_If();
1454       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1455     }
1456     Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(oopDesc::mark_offset_in_bytes()));
1457     phase->register_new_node(addr, ctrl);
1458     assert(new_val->bottom_type()->isa_oopptr(), "what else?");
1459     Node* markword = new LoadXNode(ctrl, raw_mem, addr, TypeRawPtr::BOTTOM, TypeX_X, MemNode::unordered);
1460     phase->register_new_node(markword, ctrl);
1461 
1462     // Test if object is forwarded. This is the case if lowest two bits are set.
1463     Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
1464     phase->register_new_node(masked, ctrl);
1465     Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
1466     phase->register_new_node(cmp, ctrl);
1467 
1468     // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
1469     Node* bol = new BoolNode(cmp, BoolTest::eq); // Equals 3 means it's forwarded
1470     phase->register_new_node(bol, ctrl);
1471 
1472     IfNode* iff = new IfNode(ctrl, bol, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1473     phase->register_control(iff, loop, ctrl);
1474     Node* if_fwd = new IfTrueNode(iff);
1475     phase->register_control(if_fwd, loop, iff);
1476     Node* if_not_fwd = new IfFalseNode(iff);
1477     phase->register_control(if_not_fwd, loop, iff);
1478 
1479     // Decode forward pointer: since we already have the lowest bits, we can just subtract them
1480     // from the mark word without the need for large immediate mask.
1481     Node* masked2 = new SubXNode(markword, masked);
1482     phase->register_new_node(masked2, if_fwd);
1483     Node* fwdraw = new CastX2PNode(masked2);
1484     fwdraw->init_req(0, if_fwd);
1485     phase->register_new_node(fwdraw, if_fwd);
1486     Node* fwd = new CheckCastPPNode(NULL, fwdraw, val->bottom_type());
1487     phase->register_new_node(fwd, if_fwd);
1488 
1489     // Wire up not-equal-path in slots 3.
1490     region->init_req(_fwded, if_fwd);
1491     val_phi->init_req(_fwded, fwd);
1492     raw_mem_phi->init_req(_fwded, raw_mem);
1493 
1494     // Call lrb-stub and wire up that path in slots 4
1495     Node* result_mem = NULL;
1496     ctrl = if_not_fwd;
1497     fwd = new_val;
1498     call_lrb_stub(ctrl, fwd, result_mem, raw_mem, phase);
1499     region->init_req(_evac_path, ctrl);
1500     val_phi->init_req(_evac_path, fwd);
1501     raw_mem_phi->init_req(_evac_path, result_mem);
1502 
1503     phase->register_control(region, loop, heap_stable_iff);
1504     Node* out_val = val_phi;
1505     phase->register_new_node(val_phi, region);
1506     phase->register_new_node(raw_mem_phi, region);
1507 
1508     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1509 
1510     ctrl = orig_ctrl;
1511 
1512     if (unc != NULL) {
1513       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1514         Node* u = val->fast_out(i);
1515         Node* c = phase->ctrl_or_self(u);
1516         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1517           phase->igvn().rehash_node_delayed(u);
1518           int nb = u->replace_edge(val, out_val);
1519           --i, imax -= nb;
1520         }
1521       }
1522       if (val->outcnt() == 0) {
1523         phase->igvn()._worklist.push(val);
1524       }
1525     }
1526     phase->igvn().replace_node(lrb, out_val);
1527 
1528     follow_barrier_uses(out_val, ctrl, uses, phase);
1529 
1530     for(uint next = 0; next < uses.size(); next++ ) {
1531       Node *n = uses.at(next);
1532       assert(phase->get_ctrl(n) == ctrl, "bad control");
1533       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1534       phase->set_ctrl(n, region);
1535       follow_barrier_uses(n, ctrl, uses, phase);
1536     }
1537 
1538     // The slow path call produces memory: hook the raw memory phi
1539     // from the expanded load reference barrier with the rest of the graph
1540     // which may require adding memory phis at every post dominated
1541     // region and at enclosing loop heads. Use the memory state
1542     // collected in memory_nodes to fix the memory graph. Update that
1543     // memory state as we go.
1544     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1545   }
1546   // Done expanding load-reference-barriers.
1547   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1548 
1549   for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1550     Node* barrier = state->enqueue_barrier(i);
1551     Node* pre_val = barrier->in(1);
1552 
1553     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1554       ShouldNotReachHere();
1555       continue;
1556     }
1557 
1558     Node* ctrl = phase->get_ctrl(barrier);
1559 
1560     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1561       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1562       ctrl = ctrl->in(0)->in(0);
1563       phase->set_ctrl(barrier, ctrl);
1564     } else if (ctrl->is_CallRuntime()) {
1565       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1566       ctrl = ctrl->in(0);
1567       phase->set_ctrl(barrier, ctrl);
1568     }
1569 
1570     Node* init_ctrl = ctrl;
1571     IdealLoopTree* loop = phase->get_loop(ctrl);
1572     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1573     Node* init_raw_mem = raw_mem;
1574     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1575     Node* heap_stable_ctrl = NULL;
1576     Node* null_ctrl = NULL;
1577     uint last = phase->C->unique();
1578 
1579     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1580     Node* region = new RegionNode(PATH_LIMIT);
1581     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1582 
1583     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1584     Node* region2 = new RegionNode(PATH_LIMIT2);
1585     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1586 
1587     // Stable path.
1588     test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1589     region->init_req(_heap_stable, heap_stable_ctrl);
1590     phi->init_req(_heap_stable, raw_mem);
1591 
1592     // Null path
1593     Node* reg2_ctrl = NULL;
1594     test_null(ctrl, pre_val, null_ctrl, phase);
1595     if (null_ctrl != NULL) {
1596       reg2_ctrl = null_ctrl->in(0);
1597       region2->init_req(_null_path, null_ctrl);
1598       phi2->init_req(_null_path, raw_mem);
1599     } else {
1600       region2->del_req(_null_path);
1601       phi2->del_req(_null_path);
1602     }
1603 
1604     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1605     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1606     Node* thread = new ThreadLocalNode();
1607     phase->register_new_node(thread, ctrl);
1608     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1609     phase->register_new_node(buffer_adr, ctrl);
1610     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1611     phase->register_new_node(index_adr, ctrl);
1612 
1613     BasicType index_bt = TypeX_X->basic_type();
1614     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1615     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1616     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1617     phase->register_new_node(index, ctrl);
1618     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1619     phase->register_new_node(index_cmp, ctrl);
1620     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1621     phase->register_new_node(index_test, ctrl);
1622     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1623     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1624     phase->register_control(queue_full_iff, loop, ctrl);
1625     Node* not_full = new IfTrueNode(queue_full_iff);
1626     phase->register_control(not_full, loop, queue_full_iff);
1627     Node* full = new IfFalseNode(queue_full_iff);
1628     phase->register_control(full, loop, queue_full_iff);
1629 
1630     ctrl = not_full;
1631 
1632     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1633     phase->register_new_node(next_index, ctrl);
1634 
1635     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1636     phase->register_new_node(buffer, ctrl);
1637     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1638     phase->register_new_node(log_addr, ctrl);
1639     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1640     phase->register_new_node(log_store, ctrl);
1641     // update the index
1642     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1643     phase->register_new_node(index_update, ctrl);
1644 
1645     // Fast-path case
1646     region2->init_req(_fast_path, ctrl);
1647     phi2->init_req(_fast_path, index_update);
1648 
1649     ctrl = full;
1650 
1651     Node* base = find_bottom_mem(ctrl, phase);
1652 
1653     MergeMemNode* mm = MergeMemNode::make(base);
1654     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1655     phase->register_new_node(mm, ctrl);
1656 
1657     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1658     call->init_req(TypeFunc::Control, ctrl);
1659     call->init_req(TypeFunc::I_O, phase->C->top());
1660     call->init_req(TypeFunc::Memory, mm);
1661     call->init_req(TypeFunc::FramePtr, phase->C->top());
1662     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1663     call->init_req(TypeFunc::Parms, pre_val);
1664     call->init_req(TypeFunc::Parms+1, thread);
1665     phase->register_control(call, loop, ctrl);
1666 
1667     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1668     phase->register_control(ctrl_proj, loop, call);
1669     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1670     phase->register_new_node(mem_proj, call);
1671 
1672     // Slow-path case
1673     region2->init_req(_slow_path, ctrl_proj);
1674     phi2->init_req(_slow_path, mem_proj);
1675 
1676     phase->register_control(region2, loop, reg2_ctrl);
1677     phase->register_new_node(phi2, region2);
1678 
1679     region->init_req(_heap_unstable, region2);
1680     phi->init_req(_heap_unstable, phi2);
1681 
1682     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1683     phase->register_new_node(phi, region);
1684 
1685     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1686     for(uint next = 0; next < uses.size(); next++ ) {
1687       Node *n = uses.at(next);
1688       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1689       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1690       phase->set_ctrl(n, region);
1691       follow_barrier_uses(n, init_ctrl, uses, phase);
1692     }
1693     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1694 
1695     phase->igvn().replace_node(barrier, pre_val);
1696   }
1697   assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1698 
1699 }
1700 
1701 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1702   IdealLoopTree *loop = phase->get_loop(iff);
1703   Node* loop_head = loop->_head;
1704   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1705 
1706   Node* bol = iff->in(1);
1707   Node* cmp = bol->in(1);
1708   Node* andi = cmp->in(1);
1709   Node* load = andi->in(1);
1710 
1711   assert(is_gc_state_load(load), "broken");
1712   if (!phase->is_dominator(load->in(0), entry_c)) {
1713     Node* mem_ctrl = NULL;
1714     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1715     load = load->clone();
1716     load->set_req(MemNode::Memory, mem);
1717     load->set_req(0, entry_c);
1718     phase->register_new_node(load, entry_c);
1719     andi = andi->clone();
1720     andi->set_req(1, load);
1721     phase->register_new_node(andi, entry_c);
1722     cmp = cmp->clone();
1723     cmp->set_req(1, andi);
1724     phase->register_new_node(cmp, entry_c);
1725     bol = bol->clone();
1726     bol->set_req(1, cmp);
1727     phase->register_new_node(bol, entry_c);
1728 
1729     Node* old_bol =iff->in(1);
1730     phase->igvn().replace_input_of(iff, 1, bol);
1731   }
1732 }
1733 
1734 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1735   if (!n->is_If() || n->is_CountedLoopEnd()) {
1736     return false;
1737   }
1738   Node* region = n->in(0);
1739 
1740   if (!region->is_Region()) {
1741     return false;
1742   }
1743   Node* dom = phase->idom(region);
1744   if (!dom->is_If()) {
1745     return false;
1746   }
1747 
1748   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1749     return false;
1750   }
1751 
1752   IfNode* dom_if = dom->as_If();
1753   Node* proj_true = dom_if->proj_out(1);
1754   Node* proj_false = dom_if->proj_out(0);
1755 
1756   for (uint i = 1; i < region->req(); i++) {
1757     if (phase->is_dominator(proj_true, region->in(i))) {
1758       continue;
1759     }
1760     if (phase->is_dominator(proj_false, region->in(i))) {
1761       continue;
1762     }
1763     return false;
1764   }
1765 
1766   return true;
1767 }
1768 
1769 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1770   assert(is_heap_stable_test(n), "no other tests");
1771   if (identical_backtoback_ifs(n, phase)) {
1772     Node* n_ctrl = n->in(0);
1773     if (phase->can_split_if(n_ctrl)) {
1774       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1775       if (is_heap_stable_test(n)) {
1776         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1777         assert(is_gc_state_load(gc_state_load), "broken");
1778         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1779         assert(is_gc_state_load(dom_gc_state_load), "broken");
1780         if (gc_state_load != dom_gc_state_load) {
1781           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1782         }
1783       }
1784       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1785       Node* proj_true = dom_if->proj_out(1);
1786       Node* proj_false = dom_if->proj_out(0);
1787       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1788       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1789 
1790       for (uint i = 1; i < n_ctrl->req(); i++) {
1791         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1792           bolphi->init_req(i, con_true);
1793         } else {
1794           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1795           bolphi->init_req(i, con_false);
1796         }
1797       }
1798       phase->register_new_node(bolphi, n_ctrl);
1799       phase->igvn().replace_input_of(n, 1, bolphi);
1800       phase->do_split_if(n);
1801     }
1802   }
1803 }
1804 
1805 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1806   // Find first invariant test that doesn't exit the loop
1807   LoopNode *head = loop->_head->as_Loop();
1808   IfNode* unswitch_iff = NULL;
1809   Node* n = head->in(LoopNode::LoopBackControl);
1810   int loop_has_sfpts = -1;
1811   while (n != head) {
1812     Node* n_dom = phase->idom(n);
1813     if (n->is_Region()) {
1814       if (n_dom->is_If()) {
1815         IfNode* iff = n_dom->as_If();
1816         if (iff->in(1)->is_Bool()) {
1817           BoolNode* bol = iff->in(1)->as_Bool();
1818           if (bol->in(1)->is_Cmp()) {
1819             // If condition is invariant and not a loop exit,
1820             // then found reason to unswitch.
1821             if (is_heap_stable_test(iff) &&
1822                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1823               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1824               if (loop_has_sfpts == -1) {
1825                 for(uint i = 0; i < loop->_body.size(); i++) {
1826                   Node *m = loop->_body[i];
1827                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1828                     loop_has_sfpts = 1;
1829                     break;
1830                   }
1831                 }
1832                 if (loop_has_sfpts == -1) {
1833                   loop_has_sfpts = 0;
1834                 }
1835               }
1836               if (!loop_has_sfpts) {
1837                 unswitch_iff = iff;
1838               }
1839             }
1840           }
1841         }
1842       }
1843     }
1844     n = n_dom;
1845   }
1846   return unswitch_iff;
1847 }
1848 
1849 
1850 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1851   Node_List heap_stable_tests;
1852   Node_List gc_state_loads;
1853   stack.push(phase->C->start(), 0);
1854   do {
1855     Node* n = stack.node();
1856     uint i = stack.index();
1857 
1858     if (i < n->outcnt()) {
1859       Node* u = n->raw_out(i);
1860       stack.set_index(i+1);
1861       if (!visited.test_set(u->_idx)) {
1862         stack.push(u, 0);
1863       }
1864     } else {
1865       stack.pop();
1866       if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1867         gc_state_loads.push(n);
1868       }
1869       if (n->is_If() && is_heap_stable_test(n)) {
1870         heap_stable_tests.push(n);
1871       }
1872     }
1873   } while (stack.size() > 0);
1874 
1875   bool progress;
1876   do {
1877     progress = false;
1878     for (uint i = 0; i < gc_state_loads.size(); i++) {
1879       Node* n = gc_state_loads.at(i);
1880       if (n->outcnt() != 0) {
1881         progress |= try_common_gc_state_load(n, phase);
1882       }
1883     }
1884   } while (progress);
1885 
1886   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1887     Node* n = heap_stable_tests.at(i);
1888     assert(is_heap_stable_test(n), "only evacuation test");
1889     merge_back_to_back_tests(n, phase);
1890   }
1891 
1892   if (!phase->C->major_progress()) {
1893     VectorSet seen(Thread::current()->resource_area());
1894     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1895       Node* n = heap_stable_tests.at(i);
1896       IdealLoopTree* loop = phase->get_loop(n);
1897       if (loop != phase->ltree_root() &&
1898           loop->_child == NULL &&
1899           !loop->_irreducible) {
1900         LoopNode* head = loop->_head->as_Loop();
1901         if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1902             !seen.test_set(head->_idx)) {
1903           IfNode* iff = find_unswitching_candidate(loop, phase);
1904           if (iff != NULL) {
1905             Node* bol = iff->in(1);
1906             if (head->is_strip_mined()) {
1907               head->verify_strip_mined(0);
1908             }
1909             move_heap_stable_test_out_of_loop(iff, phase);
1910             if (loop->policy_unswitching(phase)) {
1911               if (head->is_strip_mined()) {
1912                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1913                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1914               }
1915               phase->do_unswitching(loop, old_new);
1916             } else {
1917               // Not proceeding with unswitching. Move load back in
1918               // the loop.
1919               phase->igvn().replace_input_of(iff, 1, bol);
1920             }
1921           }
1922         }
1923       }
1924     }
1925   }
1926 }
1927 
1928 #ifdef ASSERT
1929 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
1930   const bool trace = false;
1931   ResourceMark rm;
1932   Unique_Node_List nodes;
1933   Unique_Node_List controls;
1934   Unique_Node_List memories;
1935 
1936   nodes.push(root);
1937   for (uint next = 0; next < nodes.size(); next++) {
1938     Node *n  = nodes.at(next);
1939     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
1940       controls.push(n);
1941       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
1942       for (uint next2 = 0; next2 < controls.size(); next2++) {
1943         Node *m = controls.at(next2);
1944         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1945           Node* u = m->fast_out(i);
1946           if (u->is_CFG() && !u->is_Root() &&
1947               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
1948               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
1949             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
1950             controls.push(u);
1951           }
1952         }
1953       }
1954       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
1955       for (uint next2 = 0; next2 < memories.size(); next2++) {
1956         Node *m = memories.at(next2);
1957         assert(m->bottom_type() == Type::MEMORY, "");
1958         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1959           Node* u = m->fast_out(i);
1960           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
1961             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1962             memories.push(u);
1963           } else if (u->is_LoadStore()) {
1964             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
1965             memories.push(u->find_out_with(Op_SCMemProj));
1966           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
1967             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1968             memories.push(u);
1969           } else if (u->is_Phi()) {
1970             assert(u->bottom_type() == Type::MEMORY, "");
1971             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
1972               assert(controls.member(u->in(0)), "");
1973               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1974               memories.push(u);
1975             }
1976           } else if (u->is_SafePoint() || u->is_MemBar()) {
1977             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
1978               Node* uu = u->fast_out(j);
1979               if (uu->bottom_type() == Type::MEMORY) {
1980                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
1981                 memories.push(uu);
1982               }
1983             }
1984           }
1985         }
1986       }
1987       for (uint next2 = 0; next2 < controls.size(); next2++) {
1988         Node *m = controls.at(next2);
1989         if (m->is_Region()) {
1990           bool all_in = true;
1991           for (uint i = 1; i < m->req(); i++) {
1992             if (!controls.member(m->in(i))) {
1993               all_in = false;
1994               break;
1995             }
1996           }
1997           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
1998           bool found_phi = false;
1999           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2000             Node* u = m->fast_out(j);
2001             if (u->is_Phi() && memories.member(u)) {
2002               found_phi = true;
2003               for (uint i = 1; i < u->req() && found_phi; i++) {
2004                 Node* k = u->in(i);
2005                 if (memories.member(k) != controls.member(m->in(i))) {
2006                   found_phi = false;
2007                 }
2008               }
2009             }
2010           }
2011           assert(found_phi || all_in, "");
2012         }
2013       }
2014       controls.clear();
2015       memories.clear();
2016     }
2017     for( uint i = 0; i < n->len(); ++i ) {
2018       Node *m = n->in(i);
2019       if (m != NULL) {
2020         nodes.push(m);
2021       }
2022     }
2023   }
2024 }
2025 #endif
2026 
2027 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2028   ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2029 }
2030 
2031 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2032   if (in(1) == NULL || in(1)->is_top()) {
2033     return Type::TOP;
2034   }
2035   const Type* t = in(1)->bottom_type();
2036   if (t == TypePtr::NULL_PTR) {
2037     return t;
2038   }
2039   return t->is_oopptr()->cast_to_nonconst();
2040 }
2041 
2042 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2043   if (in(1) == NULL) {
2044     return Type::TOP;
2045   }
2046   const Type* t = phase->type(in(1));
2047   if (t == Type::TOP) {
2048     return Type::TOP;
2049   }
2050   if (t == TypePtr::NULL_PTR) {
2051     return t;
2052   }
2053   return t->is_oopptr()->cast_to_nonconst();
2054 }
2055 
2056 int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2057   if (n == NULL ||
2058       n->is_Allocate() ||
2059       n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2060       n->bottom_type() == TypePtr::NULL_PTR ||
2061       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2062     return NotNeeded;
2063   }
2064   if (n->is_Phi() ||
2065       n->is_CMove()) {
2066     return MaybeNeeded;
2067   }
2068   return Needed;
2069 }
2070 
2071 Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2072   for (;;) {
2073     if (n == NULL) {
2074       return n;
2075     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2076       return n;
2077     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2078       return n;
2079     } else if (n->is_ConstraintCast() ||
2080                n->Opcode() == Op_DecodeN ||
2081                n->Opcode() == Op_EncodeP) {
2082       n = n->in(1);
2083     } else if (n->is_Proj()) {
2084       n = n->in(0);
2085     } else {
2086       return n;
2087     }
2088   }
2089   ShouldNotReachHere();
2090   return NULL;
2091 }
2092 
2093 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2094   PhaseIterGVN* igvn = phase->is_IterGVN();
2095 
2096   Node* n = next(in(1));
2097 
2098   int cont = needed(n);
2099 
2100   if (cont == NotNeeded) {
2101     return in(1);
2102   } else if (cont == MaybeNeeded) {
2103     if (igvn == NULL) {
2104       phase->record_for_igvn(this);
2105       return this;
2106     } else {
2107       ResourceMark rm;
2108       Unique_Node_List wq;
2109       uint wq_i = 0;
2110 
2111       for (;;) {
2112         if (n->is_Phi()) {
2113           for (uint i = 1; i < n->req(); i++) {
2114             Node* m = n->in(i);
2115             if (m != NULL) {
2116               wq.push(m);
2117             }
2118           }
2119         } else {
2120           assert(n->is_CMove(), "nothing else here");
2121           Node* m = n->in(CMoveNode::IfFalse);
2122           wq.push(m);
2123           m = n->in(CMoveNode::IfTrue);
2124           wq.push(m);
2125         }
2126         Node* orig_n = NULL;
2127         do {
2128           if (wq_i >= wq.size()) {
2129             return in(1);
2130           }
2131           n = wq.at(wq_i);
2132           wq_i++;
2133           orig_n = n;
2134           n = next(n);
2135           cont = needed(n);
2136           if (cont == Needed) {
2137             return this;
2138           }
2139         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2140       }
2141     }
2142   }
2143 
2144   return this;
2145 }
2146 
2147 #ifdef ASSERT
2148 static bool has_never_branch(Node* root) {
2149   for (uint i = 1; i < root->req(); i++) {
2150     Node* in = root->in(i);
2151     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2152       return true;
2153     }
2154   }
2155   return false;
2156 }
2157 #endif
2158 
2159 void MemoryGraphFixer::collect_memory_nodes() {
2160   Node_Stack stack(0);
2161   VectorSet visited(Thread::current()->resource_area());
2162   Node_List regions;
2163 
2164   // Walk the raw memory graph and create a mapping from CFG node to
2165   // memory node. Exclude phis for now.
2166   stack.push(_phase->C->root(), 1);
2167   do {
2168     Node* n = stack.node();
2169     int opc = n->Opcode();
2170     uint i = stack.index();
2171     if (i < n->req()) {
2172       Node* mem = NULL;
2173       if (opc == Op_Root) {
2174         Node* in = n->in(i);
2175         int in_opc = in->Opcode();
2176         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2177           mem = in->in(TypeFunc::Memory);
2178         } else if (in_opc == Op_Halt) {
2179           if (!in->in(0)->is_Region()) {
2180             Node* proj = in->in(0);
2181             assert(proj->is_Proj(), "");
2182             Node* in = proj->in(0);
2183             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2184             if (in->is_CallStaticJava()) {
2185               mem = in->in(TypeFunc::Memory);
2186             } else if (in->Opcode() == Op_Catch) {
2187               Node* call = in->in(0)->in(0);
2188               assert(call->is_Call(), "");
2189               mem = call->in(TypeFunc::Memory);
2190             } else if (in->Opcode() == Op_NeverBranch) {
2191               ResourceMark rm;
2192               Unique_Node_List wq;
2193               wq.push(in);
2194               wq.push(in->as_Multi()->proj_out(0));
2195               for (uint j = 1; j < wq.size(); j++) {
2196                 Node* c = wq.at(j);
2197                 assert(!c->is_Root(), "shouldn't leave loop");
2198                 if (c->is_SafePoint()) {
2199                   assert(mem == NULL, "only one safepoint");
2200                   mem = c->in(TypeFunc::Memory);
2201                 }
2202                 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2203                   Node* u = c->fast_out(k);
2204                   if (u->is_CFG()) {
2205                     wq.push(u);
2206                   }
2207                 }
2208               }
2209               assert(mem != NULL, "should have found safepoint");
2210             }
2211           }
2212         } else {
2213 #ifdef ASSERT
2214           n->dump();
2215           in->dump();
2216 #endif
2217           ShouldNotReachHere();
2218         }
2219       } else {
2220         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2221         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2222         mem = n->in(i);
2223       }
2224       i++;
2225       stack.set_index(i);
2226       if (mem == NULL) {
2227         continue;
2228       }
2229       for (;;) {
2230         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2231           break;
2232         }
2233         if (mem->is_Phi()) {
2234           stack.push(mem, 2);
2235           mem = mem->in(1);
2236         } else if (mem->is_Proj()) {
2237           stack.push(mem, mem->req());
2238           mem = mem->in(0);
2239         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2240           mem = mem->in(TypeFunc::Memory);
2241         } else if (mem->is_MergeMem()) {
2242           MergeMemNode* mm = mem->as_MergeMem();
2243           mem = mm->memory_at(_alias);
2244         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2245           assert(_alias == Compile::AliasIdxRaw, "");
2246           stack.push(mem, mem->req());
2247           mem = mem->in(MemNode::Memory);
2248         } else {
2249 #ifdef ASSERT
2250           mem->dump();
2251 #endif
2252           ShouldNotReachHere();
2253         }
2254       }
2255     } else {
2256       if (n->is_Phi()) {
2257         // Nothing
2258       } else if (!n->is_Root()) {
2259         Node* c = get_ctrl(n);
2260         _memory_nodes.map(c->_idx, n);
2261       }
2262       stack.pop();
2263     }
2264   } while(stack.is_nonempty());
2265 
2266   // Iterate over CFG nodes in rpo and propagate memory state to
2267   // compute memory state at regions, creating new phis if needed.
2268   Node_List rpo_list;
2269   visited.Clear();
2270   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2271   Node* root = rpo_list.pop();
2272   assert(root == _phase->C->root(), "");
2273 
2274   const bool trace = false;
2275 #ifdef ASSERT
2276   if (trace) {
2277     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2278       Node* c = rpo_list.at(i);
2279       if (_memory_nodes[c->_idx] != NULL) {
2280         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2281       }
2282     }
2283   }
2284 #endif
2285   uint last = _phase->C->unique();
2286 
2287 #ifdef ASSERT
2288   uint8_t max_depth = 0;
2289   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2290     IdealLoopTree* lpt = iter.current();
2291     max_depth = MAX2(max_depth, lpt->_nest);
2292   }
2293 #endif
2294 
2295   bool progress = true;
2296   int iteration = 0;
2297   Node_List dead_phis;
2298   while (progress) {
2299     progress = false;
2300     iteration++;
2301     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2302     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2303     IdealLoopTree* last_updated_ilt = NULL;
2304     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2305       Node* c = rpo_list.at(i);
2306 
2307       Node* prev_mem = _memory_nodes[c->_idx];
2308       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2309         Node* prev_region = regions[c->_idx];
2310         Node* unique = NULL;
2311         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2312           Node* m = _memory_nodes[c->in(j)->_idx];
2313           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2314           if (m != NULL) {
2315             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2316               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2317               // continue
2318             } else if (unique == NULL) {
2319               unique = m;
2320             } else if (m == unique) {
2321               // continue
2322             } else {
2323               unique = NodeSentinel;
2324             }
2325           }
2326         }
2327         assert(unique != NULL, "empty phi???");
2328         if (unique != NodeSentinel) {
2329           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2330             dead_phis.push(prev_region);
2331           }
2332           regions.map(c->_idx, unique);
2333         } else {
2334           Node* phi = NULL;
2335           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2336             phi = prev_region;
2337             for (uint k = 1; k < c->req(); k++) {
2338               Node* m = _memory_nodes[c->in(k)->_idx];
2339               assert(m != NULL, "expect memory state");
2340               phi->set_req(k, m);
2341             }
2342           } else {
2343             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2344               Node* u = c->fast_out(j);
2345               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2346                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2347                 phi = u;
2348                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2349                   Node* m = _memory_nodes[c->in(k)->_idx];
2350                   assert(m != NULL, "expect memory state");
2351                   if (u->in(k) != m) {
2352                     phi = NULL;
2353                   }
2354                 }
2355               }
2356             }
2357             if (phi == NULL) {
2358               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2359               for (uint k = 1; k < c->req(); k++) {
2360                 Node* m = _memory_nodes[c->in(k)->_idx];
2361                 assert(m != NULL, "expect memory state");
2362                 phi->init_req(k, m);
2363               }
2364             }
2365           }
2366           assert(phi != NULL, "");
2367           regions.map(c->_idx, phi);
2368         }
2369         Node* current_region = regions[c->_idx];
2370         if (current_region != prev_region) {
2371           progress = true;
2372           if (prev_region == prev_mem) {
2373             _memory_nodes.map(c->_idx, current_region);
2374           }
2375         }
2376       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2377         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2378         assert(m != NULL, "expect memory state");
2379         if (m != prev_mem) {
2380           _memory_nodes.map(c->_idx, m);
2381           progress = true;
2382         }
2383       }
2384 #ifdef ASSERT
2385       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2386 #endif
2387     }
2388   }
2389 
2390   // Replace existing phi with computed memory state for that region
2391   // if different (could be a new phi or a dominating memory node if
2392   // that phi was found to be useless).
2393   while (dead_phis.size() > 0) {
2394     Node* n = dead_phis.pop();
2395     n->replace_by(_phase->C->top());
2396     n->destruct();
2397   }
2398   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2399     Node* c = rpo_list.at(i);
2400     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2401       Node* n = regions[c->_idx];
2402       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2403         _phase->register_new_node(n, c);
2404       }
2405     }
2406   }
2407   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2408     Node* c = rpo_list.at(i);
2409     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2410       Node* n = regions[c->_idx];
2411       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2412         Node* u = c->fast_out(i);
2413         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2414             u != n) {
2415           if (u->adr_type() == TypePtr::BOTTOM) {
2416             fix_memory_uses(u, n, n, c);
2417           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2418             _phase->lazy_replace(u, n);
2419             --i; --imax;
2420           }
2421         }
2422       }
2423     }
2424   }
2425 }
2426 
2427 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2428   Node* c = _phase->get_ctrl(n);
2429   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2430     assert(c == n->in(0), "");
2431     CallNode* call = c->as_Call();
2432     CallProjections projs;
2433     call->extract_projections(&projs, true, false);
2434     if (projs.catchall_memproj != NULL) {
2435       if (projs.fallthrough_memproj == n) {
2436         c = projs.fallthrough_catchproj;
2437       } else {
2438         assert(projs.catchall_memproj == n, "");
2439         c = projs.catchall_catchproj;
2440       }
2441     }
2442   }
2443   return c;
2444 }
2445 
2446 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2447   if (_phase->has_ctrl(n))
2448     return get_ctrl(n);
2449   else {
2450     assert (n->is_CFG(), "must be a CFG node");
2451     return n;
2452   }
2453 }
2454 
2455 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2456   return m != NULL && get_ctrl(m) == c;
2457 }
2458 
2459 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2460   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2461   Node* mem = _memory_nodes[ctrl->_idx];
2462   Node* c = ctrl;
2463   while (!mem_is_valid(mem, c) &&
2464          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2465     c = _phase->idom(c);
2466     mem = _memory_nodes[c->_idx];
2467   }
2468   if (n != NULL && mem_is_valid(mem, c)) {
2469     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2470       mem = next_mem(mem, _alias);
2471     }
2472     if (mem->is_MergeMem()) {
2473       mem = mem->as_MergeMem()->memory_at(_alias);
2474     }
2475     if (!mem_is_valid(mem, c)) {
2476       do {
2477         c = _phase->idom(c);
2478         mem = _memory_nodes[c->_idx];
2479       } while (!mem_is_valid(mem, c) &&
2480                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2481     }
2482   }
2483   assert(mem->bottom_type() == Type::MEMORY, "");
2484   return mem;
2485 }
2486 
2487 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2488   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2489     Node* use = region->fast_out(i);
2490     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2491         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2492       return true;
2493     }
2494   }
2495   return false;
2496 }
2497 
2498 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2499   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2500   const bool trace = false;
2501   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2502   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2503   GrowableArray<Node*> phis;
2504   if (mem_for_ctrl != mem) {
2505     Node* old = mem_for_ctrl;
2506     Node* prev = NULL;
2507     while (old != mem) {
2508       prev = old;
2509       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2510         assert(_alias == Compile::AliasIdxRaw, "");
2511         old = old->in(MemNode::Memory);
2512       } else if (old->Opcode() == Op_SCMemProj) {
2513         assert(_alias == Compile::AliasIdxRaw, "");
2514         old = old->in(0);
2515       } else {
2516         ShouldNotReachHere();
2517       }
2518     }
2519     assert(prev != NULL, "");
2520     if (new_ctrl != ctrl) {
2521       _memory_nodes.map(ctrl->_idx, mem);
2522       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2523     }
2524     uint input = (uint)MemNode::Memory;
2525     _phase->igvn().replace_input_of(prev, input, new_mem);
2526   } else {
2527     uses.clear();
2528     _memory_nodes.map(new_ctrl->_idx, new_mem);
2529     uses.push(new_ctrl);
2530     for(uint next = 0; next < uses.size(); next++ ) {
2531       Node *n = uses.at(next);
2532       assert(n->is_CFG(), "");
2533       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2534       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2535         Node* u = n->fast_out(i);
2536         if (!u->is_Root() && u->is_CFG() && u != n) {
2537           Node* m = _memory_nodes[u->_idx];
2538           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2539               !has_mem_phi(u) &&
2540               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2541             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2542             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2543 
2544             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2545               bool push = true;
2546               bool create_phi = true;
2547               if (_phase->is_dominator(new_ctrl, u)) {
2548                 create_phi = false;
2549               } else if (!_phase->C->has_irreducible_loop()) {
2550                 IdealLoopTree* loop = _phase->get_loop(ctrl);
2551                 bool do_check = true;
2552                 IdealLoopTree* l = loop;
2553                 create_phi = false;
2554                 while (l != _phase->ltree_root()) {
2555                   Node* head = l->_head;
2556                   if (head->in(0) == NULL) {
2557                     head = _phase->get_ctrl(head);
2558                   }
2559                   if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2560                     create_phi = true;
2561                     do_check = false;
2562                     break;
2563                   }
2564                   l = l->_parent;
2565                 }
2566 
2567                 if (do_check) {
2568                   assert(!create_phi, "");
2569                   IdealLoopTree* u_loop = _phase->get_loop(u);
2570                   if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2571                     Node* c = ctrl;
2572                     while (!_phase->is_dominator(c, u_loop->tail())) {
2573                       c = _phase->idom(c);
2574                     }
2575                     if (!_phase->is_dominator(c, u)) {
2576                       do_check = false;
2577                     }
2578                   }
2579                 }
2580 
2581                 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2582                   create_phi = true;
2583                 }
2584               }
2585               if (create_phi) {
2586                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2587                 _phase->register_new_node(phi, u);
2588                 phis.push(phi);
2589                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2590                 if (!mem_is_valid(m, u)) {
2591                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2592                   _memory_nodes.map(u->_idx, phi);
2593                 } else {
2594                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2595                   for (;;) {
2596                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2597                     Node* next = NULL;
2598                     if (m->is_Proj()) {
2599                       next = m->in(0);
2600                     } else {
2601                       assert(m->is_Mem() || m->is_LoadStore(), "");
2602                       assert(_alias == Compile::AliasIdxRaw, "");
2603                       next = m->in(MemNode::Memory);
2604                     }
2605                     if (_phase->get_ctrl(next) != u) {
2606                       break;
2607                     }
2608                     if (next->is_MergeMem()) {
2609                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2610                       break;
2611                     }
2612                     if (next->is_Phi()) {
2613                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2614                       break;
2615                     }
2616                     m = next;
2617                   }
2618 
2619                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2620                   assert(m->is_Mem() || m->is_LoadStore(), "");
2621                   uint input = (uint)MemNode::Memory;
2622                   _phase->igvn().replace_input_of(m, input, phi);
2623                   push = false;
2624                 }
2625               } else {
2626                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2627               }
2628               if (push) {
2629                 uses.push(u);
2630               }
2631             }
2632           } else if (!mem_is_valid(m, u) &&
2633                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2634             uses.push(u);
2635           }
2636         }
2637       }
2638     }
2639     for (int i = 0; i < phis.length(); i++) {
2640       Node* n = phis.at(i);
2641       Node* r = n->in(0);
2642       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2643       for (uint j = 1; j < n->req(); j++) {
2644         Node* m = find_mem(r->in(j), NULL);
2645         _phase->igvn().replace_input_of(n, j, m);
2646         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2647       }
2648     }
2649   }
2650   uint last = _phase->C->unique();
2651   MergeMemNode* mm = NULL;
2652   int alias = _alias;
2653   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2654   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2655     Node* u = mem->out(i);
2656     if (u->_idx < last) {
2657       if (u->is_Mem()) {
2658         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2659           Node* m = find_mem(_phase->get_ctrl(u), u);
2660           if (m != mem) {
2661             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2662             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2663             --i;
2664           }
2665         }
2666       } else if (u->is_MergeMem()) {
2667         MergeMemNode* u_mm = u->as_MergeMem();
2668         if (u_mm->memory_at(alias) == mem) {
2669           MergeMemNode* newmm = NULL;
2670           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2671             Node* uu = u->fast_out(j);
2672             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2673             if (uu->is_Phi()) {
2674               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2675               Node* region = uu->in(0);
2676               int nb = 0;
2677               for (uint k = 1; k < uu->req(); k++) {
2678                 if (uu->in(k) == u) {
2679                   Node* m = find_mem(region->in(k), NULL);
2680                   if (m != mem) {
2681                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2682                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2683                     if (newmm != u) {
2684                       _phase->igvn().replace_input_of(uu, k, newmm);
2685                       nb++;
2686                       --jmax;
2687                     }
2688                   }
2689                 }
2690               }
2691               if (nb > 0) {
2692                 --j;
2693               }
2694             } else {
2695               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2696               if (m != mem) {
2697                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2698                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2699                 if (newmm != u) {
2700                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2701                   --j, --jmax;
2702                 }
2703               }
2704             }
2705           }
2706         }
2707       } else if (u->is_Phi()) {
2708         assert(u->bottom_type() == Type::MEMORY, "what else?");
2709         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2710           Node* region = u->in(0);
2711           bool replaced = false;
2712           for (uint j = 1; j < u->req(); j++) {
2713             if (u->in(j) == mem) {
2714               Node* m = find_mem(region->in(j), NULL);
2715               Node* nnew = m;
2716               if (m != mem) {
2717                 if (u->adr_type() == TypePtr::BOTTOM) {
2718                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2719                   nnew = mm;
2720                 }
2721                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2722                 _phase->igvn().replace_input_of(u, j, nnew);
2723                 replaced = true;
2724               }
2725             }
2726           }
2727           if (replaced) {
2728             --i;
2729           }
2730         }
2731       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2732                  u->adr_type() == NULL) {
2733         assert(u->adr_type() != NULL ||
2734                u->Opcode() == Op_Rethrow ||
2735                u->Opcode() == Op_Return ||
2736                u->Opcode() == Op_SafePoint ||
2737                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2738                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2739                u->Opcode() == Op_CallLeaf, "");
2740         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2741         if (m != mem) {
2742           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2743           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2744           --i;
2745         }
2746       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2747         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2748         if (m != mem) {
2749           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2750           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2751           --i;
2752         }
2753       } else if (u->adr_type() != TypePtr::BOTTOM &&
2754                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2755         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2756         assert(m != mem, "");
2757         // u is on the wrong slice...
2758         assert(u->is_ClearArray(), "");
2759         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2760         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2761         --i;
2762       }
2763     }
2764   }
2765 #ifdef ASSERT
2766   assert(new_mem->outcnt() > 0, "");
2767   for (int i = 0; i < phis.length(); i++) {
2768     Node* n = phis.at(i);
2769     assert(n->outcnt() > 0, "new phi must have uses now");
2770   }
2771 #endif
2772 }
2773 
2774 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2775   MergeMemNode* mm = MergeMemNode::make(mem);
2776   mm->set_memory_at(_alias, rep_proj);
2777   _phase->register_new_node(mm, rep_ctrl);
2778   return mm;
2779 }
2780 
2781 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2782   MergeMemNode* newmm = NULL;
2783   MergeMemNode* u_mm = u->as_MergeMem();
2784   Node* c = _phase->get_ctrl(u);
2785   if (_phase->is_dominator(c, rep_ctrl)) {
2786     c = rep_ctrl;
2787   } else {
2788     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2789   }
2790   if (u->outcnt() == 1) {
2791     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2792       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2793       --i;
2794     } else {
2795       _phase->igvn().rehash_node_delayed(u);
2796       u_mm->set_memory_at(_alias, rep_proj);
2797     }
2798     newmm = u_mm;
2799     _phase->set_ctrl_and_loop(u, c);
2800   } else {
2801     // can't simply clone u and then change one of its input because
2802     // it adds and then removes an edge which messes with the
2803     // DUIterator
2804     newmm = MergeMemNode::make(u_mm->base_memory());
2805     for (uint j = 0; j < u->req(); j++) {
2806       if (j < newmm->req()) {
2807         if (j == (uint)_alias) {
2808           newmm->set_req(j, rep_proj);
2809         } else if (newmm->in(j) != u->in(j)) {
2810           newmm->set_req(j, u->in(j));
2811         }
2812       } else if (j == (uint)_alias) {
2813         newmm->add_req(rep_proj);
2814       } else {
2815         newmm->add_req(u->in(j));
2816       }
2817     }
2818     if ((uint)_alias >= u->req()) {
2819       newmm->set_memory_at(_alias, rep_proj);
2820     }
2821     _phase->register_new_node(newmm, c);
2822   }
2823   return newmm;
2824 }
2825 
2826 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2827   if (phi->adr_type() == TypePtr::BOTTOM) {
2828     Node* region = phi->in(0);
2829     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2830       Node* uu = region->fast_out(j);
2831       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2832         return false;
2833       }
2834     }
2835     return true;
2836   }
2837   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2838 }
2839 
2840 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2841   uint last = _phase-> C->unique();
2842   MergeMemNode* mm = NULL;
2843   assert(mem->bottom_type() == Type::MEMORY, "");
2844   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2845     Node* u = mem->out(i);
2846     if (u != replacement && u->_idx < last) {
2847       if (u->is_MergeMem()) {
2848         MergeMemNode* u_mm = u->as_MergeMem();
2849         if (u_mm->memory_at(_alias) == mem) {
2850           MergeMemNode* newmm = NULL;
2851           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2852             Node* uu = u->fast_out(j);
2853             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2854             if (uu->is_Phi()) {
2855               if (should_process_phi(uu)) {
2856                 Node* region = uu->in(0);
2857                 int nb = 0;
2858                 for (uint k = 1; k < uu->req(); k++) {
2859                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2860                     if (newmm == NULL) {
2861                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2862                     }
2863                     if (newmm != u) {
2864                       _phase->igvn().replace_input_of(uu, k, newmm);
2865                       nb++;
2866                       --jmax;
2867                     }
2868                   }
2869                 }
2870                 if (nb > 0) {
2871                   --j;
2872                 }
2873               }
2874             } else {
2875               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2876                 if (newmm == NULL) {
2877                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2878                 }
2879                 if (newmm != u) {
2880                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2881                   --j, --jmax;
2882                 }
2883               }
2884             }
2885           }
2886         }
2887       } else if (u->is_Phi()) {
2888         assert(u->bottom_type() == Type::MEMORY, "what else?");
2889         Node* region = u->in(0);
2890         if (should_process_phi(u)) {
2891           bool replaced = false;
2892           for (uint j = 1; j < u->req(); j++) {
2893             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2894               Node* nnew = rep_proj;
2895               if (u->adr_type() == TypePtr::BOTTOM) {
2896                 if (mm == NULL) {
2897                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2898                 }
2899                 nnew = mm;
2900               }
2901               _phase->igvn().replace_input_of(u, j, nnew);
2902               replaced = true;
2903             }
2904           }
2905           if (replaced) {
2906             --i;
2907           }
2908 
2909         }
2910       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2911                  u->adr_type() == NULL) {
2912         assert(u->adr_type() != NULL ||
2913                u->Opcode() == Op_Rethrow ||
2914                u->Opcode() == Op_Return ||
2915                u->Opcode() == Op_SafePoint ||
2916                u->Opcode() == Op_StoreIConditional ||
2917                u->Opcode() == Op_StoreLConditional ||
2918                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2919                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2920                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2921         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2922           if (mm == NULL) {
2923             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2924           }
2925           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2926           --i;
2927         }
2928       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2929         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2930           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2931           --i;
2932         }
2933       }
2934     }
2935   }
2936 }
2937 
2938 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
2939 : Node(ctrl, obj) {
2940   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2941 }
2942 
2943 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2944   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
2945     return Type::TOP;
2946   }
2947   const Type* t = in(ValueIn)->bottom_type();
2948   if (t == TypePtr::NULL_PTR) {
2949     return t;
2950   }
2951   return t->is_oopptr();
2952 }
2953 
2954 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2955   // Either input is TOP ==> the result is TOP
2956   const Type *t2 = phase->type(in(ValueIn));
2957   if( t2 == Type::TOP ) return Type::TOP;
2958 
2959   if (t2 == TypePtr::NULL_PTR) {
2960     return t2;
2961   }
2962 
2963   const Type* type = t2->is_oopptr()/*->cast_to_nonconst()*/;
2964   return type;
2965 }
2966 
2967 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2968   Node* value = in(ValueIn);
2969   if (!needs_barrier(phase, value)) {
2970     return value;
2971   }
2972   return this;
2973 }
2974 
2975 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2976   Unique_Node_List visited;
2977   return needs_barrier_impl(phase, n, visited);
2978 }
2979 
2980 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2981   if (n == NULL) return false;
2982   if (visited.member(n)) {
2983     return false; // Been there.
2984   }
2985   visited.push(n);
2986 
2987   if (n->is_Allocate()) {
2988     // tty->print_cr("optimize barrier on alloc");
2989     return false;
2990   }
2991   if (n->is_Call()) {
2992     // tty->print_cr("optimize barrier on call");
2993     return false;
2994   }
2995 
2996   const Type* type = phase->type(n);
2997   if (type == Type::TOP) {
2998     return false;
2999   }
3000   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3001     // tty->print_cr("optimize barrier on null");
3002     return false;
3003   }
3004   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3005     // tty->print_cr("optimize barrier on constant");
3006     return false;
3007   }
3008 
3009   switch (n->Opcode()) {
3010     case Op_AddP:
3011       return true; // TODO: Can refine?
3012     case Op_LoadP:
3013     case Op_ShenandoahCompareAndExchangeN:
3014     case Op_ShenandoahCompareAndExchangeP:
3015     case Op_CompareAndExchangeN:
3016     case Op_CompareAndExchangeP:
3017     case Op_GetAndSetN:
3018     case Op_GetAndSetP:
3019       return true;
3020     case Op_Phi: {
3021       for (uint i = 1; i < n->req(); i++) {
3022         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3023       }
3024       return false;
3025     }
3026     case Op_CheckCastPP:
3027     case Op_CastPP:
3028       return needs_barrier_impl(phase, n->in(1), visited);
3029     case Op_Proj:
3030       return needs_barrier_impl(phase, n->in(0), visited);
3031     case Op_ShenandoahLoadReferenceBarrier:
3032       // tty->print_cr("optimize barrier on barrier");
3033       return false;
3034     case Op_Parm:
3035       // tty->print_cr("optimize barrier on input arg");
3036       return false;
3037     case Op_DecodeN:
3038     case Op_EncodeP:
3039       return needs_barrier_impl(phase, n->in(1), visited);
3040     case Op_LoadN:
3041       return true;
3042     case Op_CMoveN:
3043     case Op_CMoveP:
3044       return needs_barrier_impl(phase, n->in(2), visited) ||
3045              needs_barrier_impl(phase, n->in(3), visited);
3046     case Op_ShenandoahEnqueueBarrier:
3047       return needs_barrier_impl(phase, n->in(1), visited);
3048     case Op_CreateEx:
3049       return false;
3050     default:
3051       break;
3052   }
3053 #ifdef ASSERT
3054   tty->print("need barrier on?: ");
3055   tty->print_cr("ins:");
3056   n->dump(2);
3057   tty->print_cr("outs:");
3058   n->dump(-2);
3059   ShouldNotReachHere();
3060 #endif
3061   return true;
3062 }
3063 
3064 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3065   Unique_Node_List visited;
3066   Node_Stack stack(0);
3067   stack.push(this, 0);
3068 
3069   // Look for strongest strength: go over nodes looking for STRONG ones.
3070   // Stop once we encountered STRONG. Otherwise, walk until we ran out of nodes,
3071   // and then the overall strength is NONE.
3072   Strength strength = NONE;
3073   while (strength != STRONG && stack.size() > 0) {
3074     Node* n = stack.node();
3075     if (visited.member(n)) {
3076       stack.pop();
3077       continue;
3078     }
3079     visited.push(n);
3080     bool visit_users = false;
3081     switch (n->Opcode()) {
3082       case Op_CallStaticJava:
3083       case Op_CallDynamicJava:
3084       case Op_CallLeaf:
3085       case Op_CallLeafNoFP:
3086       case Op_CompareAndSwapL:
3087       case Op_CompareAndSwapI:
3088       case Op_CompareAndSwapB:
3089       case Op_CompareAndSwapS:
3090       case Op_CompareAndSwapN:
3091       case Op_CompareAndSwapP:
3092       case Op_CompareAndExchangeL:
3093       case Op_CompareAndExchangeI:
3094       case Op_CompareAndExchangeB:
3095       case Op_CompareAndExchangeS:
3096       case Op_CompareAndExchangeN:
3097       case Op_CompareAndExchangeP:
3098       case Op_WeakCompareAndSwapL:
3099       case Op_WeakCompareAndSwapI:
3100       case Op_WeakCompareAndSwapB:
3101       case Op_WeakCompareAndSwapS:
3102       case Op_WeakCompareAndSwapN:
3103       case Op_WeakCompareAndSwapP:
3104       case Op_ShenandoahCompareAndSwapN:
3105       case Op_ShenandoahCompareAndSwapP:
3106       case Op_ShenandoahWeakCompareAndSwapN:
3107       case Op_ShenandoahWeakCompareAndSwapP:
3108       case Op_ShenandoahCompareAndExchangeN:
3109       case Op_ShenandoahCompareAndExchangeP:
3110       case Op_GetAndSetL:
3111       case Op_GetAndSetI:
3112       case Op_GetAndSetB:
3113       case Op_GetAndSetS:
3114       case Op_GetAndSetP:
3115       case Op_GetAndSetN:
3116       case Op_GetAndAddL:
3117       case Op_GetAndAddI:
3118       case Op_GetAndAddB:
3119       case Op_GetAndAddS:
3120       case Op_ShenandoahEnqueueBarrier:
3121       case Op_FastLock:
3122       case Op_FastUnlock:
3123       case Op_Rethrow:
3124       case Op_Return:
3125       case Op_StoreB:
3126       case Op_StoreC:
3127       case Op_StoreD:
3128       case Op_StoreF:
3129       case Op_StoreL:
3130       case Op_StoreLConditional:
3131       case Op_StoreI:
3132       case Op_StoreIConditional:
3133       case Op_StoreN:
3134       case Op_StoreP:
3135       case Op_StoreVector:
3136       case Op_StrInflatedCopy:
3137       case Op_StrCompressedCopy:
3138       case Op_EncodeP:
3139       case Op_CastP2X:
3140       case Op_SafePoint:
3141       case Op_EncodeISOArray:
3142       case Op_AryEq:
3143       case Op_StrEquals:
3144       case Op_StrComp:
3145       case Op_StrIndexOf:
3146       case Op_StrIndexOfChar:
3147       case Op_HasNegatives:
3148         // Known to require barriers
3149         strength = STRONG;
3150         break;
3151       case Op_CmpP: {
3152         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3153             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3154           // One of the sides is known null, no need for barrier.
3155         } else {
3156           strength = STRONG;
3157         }
3158         break;
3159       }
3160       case Op_LoadB:
3161       case Op_LoadUB:
3162       case Op_LoadUS:
3163       case Op_LoadD:
3164       case Op_LoadF:
3165       case Op_LoadL:
3166       case Op_LoadI:
3167       case Op_LoadS:
3168       case Op_LoadN:
3169       case Op_LoadP:
3170       case Op_LoadVector: {
3171         const TypePtr* adr_type = n->adr_type();
3172         int alias_idx = Compile::current()->get_alias_index(adr_type);
3173         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3174         ciField* field = alias_type->field();
3175         bool is_static = field != NULL && field->is_static();
3176         bool is_final = field != NULL && field->is_final();
3177 
3178         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3179           // Loading the constant does not require barriers: it should be handled
3180           // as part of GC roots already.
3181         } else {
3182           strength = STRONG;
3183         }
3184         break;
3185       }
3186       case Op_Conv2B:
3187       case Op_LoadRange:
3188       case Op_LoadKlass:
3189       case Op_LoadNKlass:
3190         // Do not require barriers
3191         break;
3192       case Op_AddP:
3193       case Op_CheckCastPP:
3194       case Op_CastPP:
3195       case Op_CMoveP:
3196       case Op_Phi:
3197       case Op_ShenandoahLoadReferenceBarrier:
3198         // Whether or not these need the barriers depends on their users
3199         visit_users = true;
3200         break;
3201       default: {
3202 #ifdef ASSERT
3203         fatal("Unknown node in get_barrier_strength: %s", NodeClassNames[n->Opcode()]);
3204 #else
3205         // Default to strong: better to have excess barriers, rather than miss some.
3206         strength = STRONG;
3207 #endif
3208       }
3209     }
3210 
3211     stack.pop();
3212     if (visit_users) {
3213       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3214         Node* user = n->fast_out(i);
3215         if (user != NULL) {
3216           stack.push(user, 0);
3217         }
3218       }
3219     }
3220   }
3221   return strength;
3222 }
3223 
3224 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3225   Node* val = in(ValueIn);
3226 
3227   const Type* val_t = igvn.type(val);
3228 
3229   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3230       val->Opcode() == Op_CastPP &&
3231       val->in(0) != NULL &&
3232       val->in(0)->Opcode() == Op_IfTrue &&
3233       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3234       val->in(0)->in(0)->is_If() &&
3235       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3236       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3237       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3238       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3239       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3240     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3241     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3242     return unc;
3243   }
3244   return NULL;
3245 }