1 /*
   2  * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  29 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  31 #include "gc/shenandoah/shenandoahForwarding.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  34 #include "gc/shenandoah/shenandoahRuntime.hpp"
  35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  36 #include "opto/arraycopynode.hpp"
  37 #include "opto/block.hpp"
  38 #include "opto/callnode.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "opto/subnode.hpp"
  45 
  46 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  47   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  48   if ((state->iu_barriers_count() +
  49        state->load_reference_barriers_count()) > 0) {
  50     assert(C->post_loop_opts_phase(), "no loop opts allowed");
  51     C->reset_post_loop_opts_phase(); // ... but we know what we are doing
  52     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  53     C->clear_major_progress();
  54     PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
  55     if (C->failing()) return false;
  56     PhaseIdealLoop::verify(igvn);
  57     DEBUG_ONLY(verify_raw_mem(C->root());)
  58     if (attempt_more_loopopts) {
  59       C->set_major_progress();
  60       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  61         return false;
  62       }
  63       C->clear_major_progress();
  64 
  65       C->process_for_post_loop_opts_igvn(igvn);
  66     }
  67     C->set_post_loop_opts_phase(); // now for real!
  68   }
  69   return true;
  70 }
  71 
  72 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  73   if (!UseShenandoahGC) {
  74     return false;
  75   }
  76   assert(iff->is_If(), "bad input");
  77   if (iff->Opcode() != Op_If) {
  78     return false;
  79   }
  80   Node* bol = iff->in(1);
  81   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  82     return false;
  83   }
  84   Node* cmp = bol->in(1);
  85   if (cmp->Opcode() != Op_CmpI) {
  86     return false;
  87   }
  88   Node* in1 = cmp->in(1);
  89   Node* in2 = cmp->in(2);
  90   if (in2->find_int_con(-1) != 0) {
  91     return false;
  92   }
  93   if (in1->Opcode() != Op_AndI) {
  94     return false;
  95   }
  96   in2 = in1->in(2);
  97   if (in2->find_int_con(-1) != mask) {
  98     return false;
  99   }
 100   in1 = in1->in(1);
 101 
 102   return is_gc_state_load(in1);
 103 }
 104 
 105 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 106   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 107 }
 108 
 109 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 110   if (!UseShenandoahGC) {
 111     return false;
 112   }
 113   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 114     return false;
 115   }
 116   Node* addp = n->in(MemNode::Address);
 117   if (!addp->is_AddP()) {
 118     return false;
 119   }
 120   Node* base = addp->in(AddPNode::Address);
 121   Node* off = addp->in(AddPNode::Offset);
 122   if (base->Opcode() != Op_ThreadLocal) {
 123     return false;
 124   }
 125   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 126     return false;
 127   }
 128   return true;
 129 }
 130 
 131 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 132   assert(phase->is_dominator(stop, start), "bad inputs");
 133   ResourceMark rm;
 134   Unique_Node_List wq;
 135   wq.push(start);
 136   for (uint next = 0; next < wq.size(); next++) {
 137     Node *m = wq.at(next);
 138     if (m == stop) {
 139       continue;
 140     }
 141     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 142       return true;
 143     }
 144     if (m->is_Region()) {
 145       for (uint i = 1; i < m->req(); i++) {
 146         wq.push(m->in(i));
 147       }
 148     } else {
 149       wq.push(m->in(0));
 150     }
 151   }
 152   return false;
 153 }
 154 
 155 #ifdef ASSERT
 156 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 157   assert(phis.size() == 0, "");
 158 
 159   while (true) {
 160     if (in->bottom_type() == TypePtr::NULL_PTR) {
 161       if (trace) {tty->print_cr("NULL");}
 162     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 163       if (trace) {tty->print_cr("Non oop");}
 164     } else {
 165       if (in->is_ConstraintCast()) {
 166         in = in->in(1);
 167         continue;
 168       } else if (in->is_AddP()) {
 169         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 170         in = in->in(AddPNode::Address);
 171         continue;
 172       } else if (in->is_Con()) {
 173         if (trace) {
 174           tty->print("Found constant");
 175           in->dump();
 176         }
 177       } else if (in->Opcode() == Op_Parm) {
 178         if (trace) {
 179           tty->print("Found argument");
 180         }
 181       } else if (in->Opcode() == Op_CreateEx) {
 182         if (trace) {
 183           tty->print("Found create-exception");
 184         }
 185       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 186         if (trace) {
 187           tty->print("Found raw LoadP (OSR argument?)");
 188         }
 189       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 190         if (t == ShenandoahOopStore) {
 191           uint i = 0;
 192           for (; i < phis.size(); i++) {
 193             Node* n = phis.node_at(i);
 194             if (n->Opcode() == Op_ShenandoahIUBarrier) {
 195               break;
 196             }
 197           }
 198           if (i == phis.size()) {
 199             return false;
 200           }
 201         }
 202         barriers_used.push(in);
 203         if (trace) {tty->print("Found barrier"); in->dump();}
 204       } else if (in->Opcode() == Op_ShenandoahIUBarrier) {
 205         if (t != ShenandoahOopStore) {
 206           in = in->in(1);
 207           continue;
 208         }
 209         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 210         phis.push(in, in->req());
 211         in = in->in(1);
 212         continue;
 213       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 214         if (trace) {
 215           tty->print("Found alloc");
 216           in->in(0)->dump();
 217         }
 218       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 219         if (trace) {
 220           tty->print("Found Java call");
 221         }
 222       } else if (in->is_Phi()) {
 223         if (!visited.test_set(in->_idx)) {
 224           if (trace) {tty->print("Pushed phi:"); in->dump();}
 225           phis.push(in, 2);
 226           in = in->in(1);
 227           continue;
 228         }
 229         if (trace) {tty->print("Already seen phi:"); in->dump();}
 230       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 231         if (!visited.test_set(in->_idx)) {
 232           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 233           phis.push(in, CMoveNode::IfTrue);
 234           in = in->in(CMoveNode::IfFalse);
 235           continue;
 236         }
 237         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 238       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 239         in = in->in(1);
 240         continue;
 241       } else {
 242         return false;
 243       }
 244     }
 245     bool cont = false;
 246     while (phis.is_nonempty()) {
 247       uint idx = phis.index();
 248       Node* phi = phis.node();
 249       if (idx >= phi->req()) {
 250         if (trace) {tty->print("Popped phi:"); phi->dump();}
 251         phis.pop();
 252         continue;
 253       }
 254       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 255       in = phi->in(idx);
 256       phis.set_index(idx+1);
 257       cont = true;
 258       break;
 259     }
 260     if (!cont) {
 261       break;
 262     }
 263   }
 264   return true;
 265 }
 266 
 267 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 268   if (n1 != NULL) {
 269     n1->dump(+10);
 270   }
 271   if (n2 != NULL) {
 272     n2->dump(+10);
 273   }
 274   fatal("%s", msg);
 275 }
 276 
 277 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 278   ResourceMark rm;
 279   Unique_Node_List wq;
 280   GrowableArray<Node*> barriers;
 281   Unique_Node_List barriers_used;
 282   Node_Stack phis(0);
 283   VectorSet visited;
 284   const bool trace = false;
 285   const bool verify_no_useless_barrier = false;
 286 
 287   wq.push(root);
 288   for (uint next = 0; next < wq.size(); next++) {
 289     Node *n = wq.at(next);
 290     if (n->is_Load()) {
 291       const bool trace = false;
 292       if (trace) {tty->print("Verifying"); n->dump();}
 293       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 294         if (trace) {tty->print_cr("Load range/klass");}
 295       } else {
 296         const TypePtr* adr_type = n->as_Load()->adr_type();
 297 
 298         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 299           if (trace) {tty->print_cr("Mark load");}
 300         } else if (adr_type->isa_instptr() &&
 301                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 302                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
 303           if (trace) {tty->print_cr("Reference.get()");}
 304         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 305           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 306         }
 307       }
 308     } else if (n->is_Store()) {
 309       const bool trace = false;
 310 
 311       if (trace) {tty->print("Verifying"); n->dump();}
 312       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 313         Node* adr = n->in(MemNode::Address);
 314         bool verify = true;
 315 
 316         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 317           adr = adr->in(AddPNode::Address);
 318           if (adr->is_AddP()) {
 319             assert(adr->in(AddPNode::Base)->is_top(), "");
 320             adr = adr->in(AddPNode::Address);
 321             if (adr->Opcode() == Op_LoadP &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 324                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 325               if (trace) {tty->print_cr("SATB prebarrier");}
 326               verify = false;
 327             }
 328           }
 329         }
 330 
 331         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 332           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 333         }
 334       }
 335       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 336         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 337       }
 338     } else if (n->Opcode() == Op_CmpP) {
 339       const bool trace = false;
 340 
 341       Node* in1 = n->in(1);
 342       Node* in2 = n->in(2);
 343       if (in1->bottom_type()->isa_oopptr()) {
 344         if (trace) {tty->print("Verifying"); n->dump();}
 345 
 346         bool mark_inputs = false;
 347         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 348             (in1->is_Con() || in2->is_Con())) {
 349           if (trace) {tty->print_cr("Comparison against a constant");}
 350           mark_inputs = true;
 351         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 352                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 353           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 354           mark_inputs = true;
 355         } else {
 356           assert(in2->bottom_type()->isa_oopptr(), "");
 357 
 358           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 359               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 360             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 361           }
 362         }
 363         if (verify_no_useless_barrier &&
 364             mark_inputs &&
 365             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 366              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 367           phis.clear();
 368           visited.reset();
 369         }
 370       }
 371     } else if (n->is_LoadStore()) {
 372       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 373           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 374         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 375       }
 376 
 377       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 378         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 379       }
 380     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 381       CallNode* call = n->as_Call();
 382 
 383       static struct {
 384         const char* name;
 385         struct {
 386           int pos;
 387           verify_type t;
 388         } args[6];
 389       } calls[] = {
 390         "aescrypt_encryptBlock",
 391         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 392           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 393         "aescrypt_decryptBlock",
 394         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 395           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 396         "multiplyToLen",
 397         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 398           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 399         "squareToLen",
 400         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 401           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 402         "montgomery_multiply",
 403         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 404           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 405         "montgomery_square",
 406         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 407           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 408         "mulAdd",
 409         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 410           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 411         "vectorizedMismatch",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 413           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "updateBytesCRC32",
 415         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 416           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "updateBytesAdler32",
 418         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "updateBytesCRC32C",
 421         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "counterMode_AESCrypt",
 424         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 425           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 426         "cipherBlockChaining_encryptAESCrypt",
 427         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 428           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "cipherBlockChaining_decryptAESCrypt",
 430         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 431           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "shenandoah_clone_barrier",
 433         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "ghash_processBlocks",
 436         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "sha1_implCompress",
 439         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "sha256_implCompress",
 442         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444         "sha512_implCompress",
 445         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 446           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 447         "sha1_implCompressMB",
 448         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 449           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 450         "sha256_implCompressMB",
 451         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 452           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 453         "sha512_implCompressMB",
 454         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 455           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 456         "encodeBlock",
 457         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 458           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 459         "decodeBlock",
 460         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 461           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 462       };
 463 
 464       if (call->is_call_to_arraycopystub()) {
 465         Node* dest = NULL;
 466         const TypeTuple* args = n->as_Call()->_tf->domain_sig();
 467         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 468           if (args->field_at(i)->isa_ptr()) {
 469             j++;
 470             if (j == 2) {
 471               dest = n->in(i);
 472               break;
 473             }
 474           }
 475         }
 476         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 477             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 478           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 479         }
 480       } else if (strlen(call->_name) > 5 &&
 481                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 482         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 483           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 484         }
 485       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 486         // skip
 487       } else {
 488         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 489         int i = 0;
 490         for (; i < calls_len; i++) {
 491           if (!strcmp(calls[i].name, call->_name)) {
 492             break;
 493           }
 494         }
 495         if (i != calls_len) {
 496           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 497           for (uint j = 0; j < args_len; j++) {
 498             int pos = calls[i].args[j].pos;
 499             if (pos == -1) {
 500               break;
 501             }
 502             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 503               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 504             }
 505           }
 506           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 507             if (call->in(j)->bottom_type()->make_ptr() &&
 508                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 509               uint k = 0;
 510               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 511               if (k == args_len) {
 512                 fatal("arg %d for call %s not covered", j, call->_name);
 513               }
 514             }
 515           }
 516         } else {
 517           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 518             if (call->in(j)->bottom_type()->make_ptr() &&
 519                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 520               fatal("%s not covered", call->_name);
 521             }
 522           }
 523         }
 524       }
 525     } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 526       // skip
 527     } else if (n->is_AddP()
 528                || n->is_Phi()
 529                || n->is_ConstraintCast()
 530                || n->Opcode() == Op_Return
 531                || n->Opcode() == Op_CMoveP
 532                || n->Opcode() == Op_CMoveN
 533                || n->Opcode() == Op_Rethrow
 534                || n->is_MemBar()
 535                || n->Opcode() == Op_Conv2B
 536                || n->Opcode() == Op_SafePoint
 537                || n->is_CallJava()
 538                || n->Opcode() == Op_Unlock
 539                || n->Opcode() == Op_EncodeP
 540                || n->Opcode() == Op_DecodeN) {
 541       // nothing to do
 542     } else {
 543       static struct {
 544         int opcode;
 545         struct {
 546           int pos;
 547           verify_type t;
 548         } inputs[2];
 549       } others[] = {
 550         Op_FastLock,
 551         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 552         Op_Lock,
 553         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 554         Op_ArrayCopy,
 555         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 556         Op_StrCompressedCopy,
 557         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 558         Op_StrInflatedCopy,
 559         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 560         Op_AryEq,
 561         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 562         Op_StrIndexOf,
 563         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 564         Op_StrComp,
 565         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 566         Op_StrEquals,
 567         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 568         Op_EncodeISOArray,
 569         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 570         Op_HasNegatives,
 571         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 572         Op_CastP2X,
 573         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 574         Op_StrIndexOfChar,
 575         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 576       };
 577 
 578       const int others_len = sizeof(others) / sizeof(others[0]);
 579       int i = 0;
 580       for (; i < others_len; i++) {
 581         if (others[i].opcode == n->Opcode()) {
 582           break;
 583         }
 584       }
 585       uint stop = n->is_Call() ? n->as_Call()->tf()->domain_sig()->cnt() : n->req();
 586       if (i != others_len) {
 587         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 588         for (uint j = 0; j < inputs_len; j++) {
 589           int pos = others[i].inputs[j].pos;
 590           if (pos == -1) {
 591             break;
 592           }
 593           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 594             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 595           }
 596         }
 597         for (uint j = 1; j < stop; j++) {
 598           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 599               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 600             uint k = 0;
 601             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 602             if (k == inputs_len) {
 603               fatal("arg %d for node %s not covered", j, n->Name());
 604             }
 605           }
 606         }
 607       } else {
 608         for (uint j = 1; j < stop; j++) {
 609           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 610               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 611             fatal("%s not covered", n->Name());
 612           }
 613         }
 614       }
 615     }
 616 
 617     if (n->is_SafePoint()) {
 618       SafePointNode* sfpt = n->as_SafePoint();
 619       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 620         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 621           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 622             phis.clear();
 623             visited.reset();
 624           }
 625         }
 626       }
 627     }
 628   }
 629 
 630   if (verify_no_useless_barrier) {
 631     for (int i = 0; i < barriers.length(); i++) {
 632       Node* n = barriers.at(i);
 633       if (!barriers_used.member(n)) {
 634         tty->print("XXX useless barrier"); n->dump(-2);
 635         ShouldNotReachHere();
 636       }
 637     }
 638   }
 639 }
 640 #endif
 641 
 642 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 643   // That both nodes have the same control is not sufficient to prove
 644   // domination, verify that there's no path from d to n
 645   ResourceMark rm;
 646   Unique_Node_List wq;
 647   wq.push(d);
 648   for (uint next = 0; next < wq.size(); next++) {
 649     Node *m = wq.at(next);
 650     if (m == n) {
 651       return false;
 652     }
 653     if (m->is_Phi() && m->in(0)->is_Loop()) {
 654       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 655     } else {
 656       if (m->is_Store() || m->is_LoadStore()) {
 657         // Take anti-dependencies into account
 658         Node* mem = m->in(MemNode::Memory);
 659         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 660           Node* u = mem->fast_out(i);
 661           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 662               phase->ctrl_or_self(u) == c) {
 663             wq.push(u);
 664           }
 665         }
 666       }
 667       for (uint i = 0; i < m->req(); i++) {
 668         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 669           wq.push(m->in(i));
 670         }
 671       }
 672     }
 673   }
 674   return true;
 675 }
 676 
 677 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 678   if (d_c != n_c) {
 679     return phase->is_dominator(d_c, n_c);
 680   }
 681   return is_dominator_same_ctrl(d_c, d, n, phase);
 682 }
 683 
 684 Node* next_mem(Node* mem, int alias) {
 685   Node* res = NULL;
 686   if (mem->is_Proj()) {
 687     res = mem->in(0);
 688   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 689     res = mem->in(TypeFunc::Memory);
 690   } else if (mem->is_Phi()) {
 691     res = mem->in(1);
 692   } else if (mem->is_MergeMem()) {
 693     res = mem->as_MergeMem()->memory_at(alias);
 694   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 695     assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 696     res = mem->in(MemNode::Memory);
 697   } else {
 698 #ifdef ASSERT
 699     mem->dump();
 700 #endif
 701     ShouldNotReachHere();
 702   }
 703   return res;
 704 }
 705 
 706 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 707   Node* iffproj = NULL;
 708   while (c != dom) {
 709     Node* next = phase->idom(c);
 710     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 711     if (c->is_Region()) {
 712       ResourceMark rm;
 713       Unique_Node_List wq;
 714       wq.push(c);
 715       for (uint i = 0; i < wq.size(); i++) {
 716         Node *n = wq.at(i);
 717         if (n == next) {
 718           continue;
 719         }
 720         if (n->is_Region()) {
 721           for (uint j = 1; j < n->req(); j++) {
 722             wq.push(n->in(j));
 723           }
 724         } else {
 725           wq.push(n->in(0));
 726         }
 727       }
 728       for (uint i = 0; i < wq.size(); i++) {
 729         Node *n = wq.at(i);
 730         assert(n->is_CFG(), "");
 731         if (n->is_Multi()) {
 732           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 733             Node* u = n->fast_out(j);
 734             if (u->is_CFG()) {
 735               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 736                 return NodeSentinel;
 737               }
 738             }
 739           }
 740         }
 741       }
 742     } else  if (c->is_Proj()) {
 743       if (c->is_IfProj()) {
 744         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 745           // continue;
 746         } else {
 747           if (!allow_one_proj) {
 748             return NodeSentinel;
 749           }
 750           if (iffproj == NULL) {
 751             iffproj = c;
 752           } else {
 753             return NodeSentinel;
 754           }
 755         }
 756       } else if (c->Opcode() == Op_JumpProj) {
 757         return NodeSentinel; // unsupported
 758       } else if (c->Opcode() == Op_CatchProj) {
 759         return NodeSentinel; // unsupported
 760       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 761         return NodeSentinel; // unsupported
 762       } else {
 763         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 764       }
 765     }
 766     c = next;
 767   }
 768   return iffproj;
 769 }
 770 
 771 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 772   ResourceMark rm;
 773   VectorSet wq;
 774   wq.set(mem->_idx);
 775   mem_ctrl = phase->ctrl_or_self(mem);
 776   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 777     mem = next_mem(mem, alias);
 778     if (wq.test_set(mem->_idx)) {
 779       return NULL;
 780     }
 781     mem_ctrl = phase->ctrl_or_self(mem);
 782   }
 783   if (mem->is_MergeMem()) {
 784     mem = mem->as_MergeMem()->memory_at(alias);
 785     mem_ctrl = phase->ctrl_or_self(mem);
 786   }
 787   return mem;
 788 }
 789 
 790 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 791   Node* mem = NULL;
 792   Node* c = ctrl;
 793   do {
 794     if (c->is_Region()) {
 795       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 796         Node* u = c->fast_out(i);
 797         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 798           if (u->adr_type() == TypePtr::BOTTOM) {
 799             mem = u;
 800           }
 801         }
 802       }
 803     } else {
 804       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 805         CallProjections* projs = c->as_Call()->extract_projections(true, false);
 806         if (projs->fallthrough_memproj != NULL) {
 807           if (projs->fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 808             if (projs->catchall_memproj == NULL) {
 809               mem = projs->fallthrough_memproj;

 810             } else {
 811               if (phase->is_dominator(projs->fallthrough_catchproj, ctrl)) {
 812                 mem = projs->fallthrough_memproj;
 813               } else {
 814                 assert(phase->is_dominator(projs->catchall_catchproj, ctrl), "one proj must dominate barrier");
 815                 mem = projs->catchall_memproj;
 816               }
 817             }
 818           }
 819         } else {
 820           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 821           if (proj != NULL &&
 822               proj->adr_type() == TypePtr::BOTTOM) {
 823             mem = proj;
 824           }
 825         }
 826       } else {
 827         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 828           Node* u = c->fast_out(i);
 829           if (u->is_Proj() &&
 830               u->bottom_type() == Type::MEMORY &&
 831               u->adr_type() == TypePtr::BOTTOM) {
 832               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 833               assert(mem == NULL, "only one proj");
 834               mem = u;
 835           }
 836         }
 837         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 838       }
 839     }
 840     c = phase->idom(c);
 841   } while (mem == NULL);
 842   return mem;
 843 }
 844 
 845 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 846   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 847     Node* u = n->fast_out(i);
 848     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 849       uses.push(u);
 850     }
 851   }
 852 }
 853 
 854 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 855   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 856   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 857   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 858   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 859   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 860   phase->lazy_replace(outer, new_outer);
 861   phase->lazy_replace(le, new_le);
 862   inner->clear_strip_mined();
 863 }
 864 
 865 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 866                                                PhaseIdealLoop* phase, int flags) {
 867   PhaseIterGVN& igvn = phase->igvn();
 868   Node* old_ctrl = ctrl;
 869 
 870   Node* thread          = new ThreadLocalNode();
 871   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 872   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 873   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 874                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 875                                         TypeInt::BYTE, MemNode::unordered);
 876   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 877   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 878   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 879 
 880   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 881   ctrl                  = new IfTrueNode(gc_state_iff);
 882   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 883 
 884   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 885   phase->register_control(gc_state_iff,   loop, old_ctrl);
 886   phase->register_control(ctrl,           loop, gc_state_iff);
 887   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 888 
 889   phase->register_new_node(thread,        old_ctrl);
 890   phase->register_new_node(gc_state_addr, old_ctrl);
 891   phase->register_new_node(gc_state,      old_ctrl);
 892   phase->register_new_node(gc_state_and,  old_ctrl);
 893   phase->register_new_node(gc_state_cmp,  old_ctrl);
 894   phase->register_new_node(gc_state_bool, old_ctrl);
 895 
 896   phase->set_ctrl(gc_state_offset, phase->C->root());
 897 
 898   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 899 }
 900 
 901 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 902   Node* old_ctrl = ctrl;
 903   PhaseIterGVN& igvn = phase->igvn();
 904 
 905   const Type* val_t = igvn.type(val);
 906   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 907     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 908     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 909 
 910     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 911     ctrl             = new IfTrueNode(null_iff);
 912     null_ctrl        = new IfFalseNode(null_iff);
 913 
 914     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 915     phase->register_control(null_iff,  loop, old_ctrl);
 916     phase->register_control(ctrl,      loop, null_iff);
 917     phase->register_control(null_ctrl, loop, null_iff);
 918 
 919     phase->register_new_node(null_cmp,  old_ctrl);
 920     phase->register_new_node(null_test, old_ctrl);
 921   }
 922 }
 923 
 924 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 925   Node* old_ctrl = ctrl;
 926   PhaseIterGVN& igvn = phase->igvn();
 927 
 928   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 929   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 930 
 931   // Figure out the target cset address with raw pointer math.
 932   // This avoids matching AddP+LoadB that would emit inefficient code.
 933   // See JDK-8245465.
 934   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 935   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 936   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 937   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 938 
 939   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 940                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 941                                        TypeInt::BYTE, MemNode::unordered);
 942   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 943   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 944 
 945   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 946   ctrl                 = new IfTrueNode(cset_iff);
 947   not_cset_ctrl        = new IfFalseNode(cset_iff);
 948 
 949   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 950   phase->register_control(cset_iff,      loop, old_ctrl);
 951   phase->register_control(ctrl,          loop, cset_iff);
 952   phase->register_control(not_cset_ctrl, loop, cset_iff);
 953 
 954   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 955 
 956   phase->register_new_node(raw_val,        old_ctrl);
 957   phase->register_new_node(cset_idx,       old_ctrl);
 958   phase->register_new_node(cset_addr,      old_ctrl);
 959   phase->register_new_node(cset_load_addr, old_ctrl);
 960   phase->register_new_node(cset_load_ptr,  old_ctrl);
 961   phase->register_new_node(cset_load,      old_ctrl);
 962   phase->register_new_node(cset_cmp,       old_ctrl);
 963   phase->register_new_node(cset_bool,      old_ctrl);
 964 }
 965 
 966 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem,
 967                                                DecoratorSet decorators, PhaseIdealLoop* phase) {
 968   IdealLoopTree*loop = phase->get_loop(ctrl);
 969   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 970 
 971   // The slow path stub consumes and produces raw memory in addition
 972   // to the existing memory edges
 973   Node* base = find_bottom_mem(ctrl, phase);
 974   MergeMemNode* mm = MergeMemNode::make(base);
 975   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
 976   phase->register_new_node(mm, ctrl);
 977 
 978   address calladdr = NULL;
 979   const char* name = NULL;
 980   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 981   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 982   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 983   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 984   bool is_narrow  = UseCompressedOops && !is_native;
 985   if (is_strong) {
 986     if (is_narrow) {
 987       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 988       name = "load_reference_barrier_strong_narrow";
 989     } else {
 990       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 991       name = "load_reference_barrier_strong";
 992     }
 993   } else if (is_weak) {
 994     if (is_narrow) {
 995       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 996       name = "load_reference_barrier_weak_narrow";
 997     } else {
 998       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 999       name = "load_reference_barrier_weak";
1000     }
1001   } else {
1002     assert(is_phantom, "only remaining strength");
1003     if (is_narrow) {
1004       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1005       name = "load_reference_barrier_phantom_narrow";
1006     } else {
1007       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1008       name = "load_reference_barrier_phantom";
1009     }
1010   }
1011   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1012 
1013   call->init_req(TypeFunc::Control, ctrl);
1014   call->init_req(TypeFunc::I_O, phase->C->top());
1015   call->init_req(TypeFunc::Memory, mm);
1016   call->init_req(TypeFunc::FramePtr, phase->C->top());
1017   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1018   call->init_req(TypeFunc::Parms, val);
1019   call->init_req(TypeFunc::Parms+1, load_addr);
1020   phase->register_control(call, loop, ctrl);
1021   ctrl = new ProjNode(call, TypeFunc::Control);
1022   phase->register_control(ctrl, loop, call);
1023   result_mem = new ProjNode(call, TypeFunc::Memory);
1024   phase->register_new_node(result_mem, call);
1025   val = new ProjNode(call, TypeFunc::Parms);
1026   phase->register_new_node(val, call);
1027   val = new CheckCastPPNode(ctrl, val, obj_type);
1028   phase->register_new_node(val, ctrl);
1029 }
1030 
1031 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1032   Node* ctrl = phase->get_ctrl(barrier);
1033   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1034 
1035   // Update the control of all nodes that should be after the
1036   // barrier control flow
1037   uses.clear();
1038   // Every node that is control dependent on the barrier's input
1039   // control will be after the expanded barrier. The raw memory (if
1040   // its memory is control dependent on the barrier's input control)
1041   // must stay above the barrier.
1042   uses_to_ignore.clear();
1043   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1044     uses_to_ignore.push(init_raw_mem);
1045   }
1046   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1047     Node *n = uses_to_ignore.at(next);
1048     for (uint i = 0; i < n->req(); i++) {
1049       Node* in = n->in(i);
1050       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1051         uses_to_ignore.push(in);
1052       }
1053     }
1054   }
1055   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1056     Node* u = ctrl->fast_out(i);
1057     if (u->_idx < last &&
1058         u != barrier &&
1059         !uses_to_ignore.member(u) &&
1060         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1061         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1062       Node* old_c = phase->ctrl_or_self(u);
1063       Node* c = old_c;
1064       if (c != ctrl ||
1065           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1066           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1067         phase->igvn().rehash_node_delayed(u);
1068         int nb = u->replace_edge(ctrl, region, &phase->igvn());
1069         if (u->is_CFG()) {
1070           if (phase->idom(u) == ctrl) {
1071             phase->set_idom(u, region, phase->dom_depth(region));
1072           }
1073         } else if (phase->get_ctrl(u) == ctrl) {
1074           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1075           uses.push(u);
1076         }
1077         assert(nb == 1, "more than 1 ctrl input?");
1078         --i, imax -= nb;
1079       }
1080     }
1081   }
1082 }
1083 
1084 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections* projs, PhaseIdealLoop* phase) {
1085   Node* region = NULL;
1086   while (c != ctrl) {
1087     if (c->is_Region()) {
1088       region = c;
1089     }
1090     c = phase->idom(c);
1091   }
1092   assert(region != NULL, "");
1093   Node* phi = new PhiNode(region, n->bottom_type());
1094   for (uint j = 1; j < region->req(); j++) {
1095     Node* in = region->in(j);
1096     if (phase->is_dominator(projs->fallthrough_catchproj, in)) {
1097       phi->init_req(j, n);
1098     } else if (phase->is_dominator(projs->catchall_catchproj, in)) {
1099       phi->init_req(j, n_clone);
1100     } else {
1101       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1102     }
1103   }
1104   phase->register_new_node(phi, region);
1105   return phi;
1106 }
1107 
1108 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1109   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1110 
1111   Unique_Node_List uses;
1112   for (int i = 0; i < state->iu_barriers_count(); i++) {
1113     Node* barrier = state->iu_barrier(i);
1114     Node* ctrl = phase->get_ctrl(barrier);
1115     IdealLoopTree* loop = phase->get_loop(ctrl);
1116     Node* head = loop->head();
1117     if (head->is_OuterStripMinedLoop()) {
1118       // Expanding a barrier here will break loop strip mining
1119       // verification. Transform the loop so the loop nest doesn't
1120       // appear as strip mined.
1121       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1122       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1123     }
1124   }
1125 
1126   Node_Stack stack(0);
1127   Node_List clones;
1128   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1129     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1130 
1131     Node* ctrl = phase->get_ctrl(lrb);
1132     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1133 
1134     CallStaticJavaNode* unc = NULL;
1135     Node* unc_ctrl = NULL;
1136     Node* uncasted_val = val;
1137 
1138     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1139       Node* u = lrb->fast_out(i);
1140       if (u->Opcode() == Op_CastPP &&
1141           u->in(0) != NULL &&
1142           phase->is_dominator(u->in(0), ctrl)) {
1143         const Type* u_t = phase->igvn().type(u);
1144 
1145         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1146             u->in(0)->Opcode() == Op_IfTrue &&
1147             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1148             u->in(0)->in(0)->is_If() &&
1149             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1150             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1151             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1152             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1153             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1154           IdealLoopTree* loop = phase->get_loop(ctrl);
1155           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1156 
1157           if (!unc_loop->is_member(loop)) {
1158             continue;
1159           }
1160 
1161           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1162           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1163           if (branch == NodeSentinel) {
1164             continue;
1165           }
1166 
1167           Node* iff = u->in(0)->in(0);
1168           Node* bol = iff->in(1)->clone();
1169           Node* cmp = bol->in(1)->clone();
1170           cmp->set_req(1, lrb);
1171           bol->set_req(1, cmp);
1172           phase->igvn().replace_input_of(iff, 1, bol);
1173           phase->set_ctrl(lrb, iff->in(0));
1174           phase->register_new_node(cmp, iff->in(0));
1175           phase->register_new_node(bol, iff->in(0));
1176           break;
1177         }
1178       }
1179     }
1180     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1181       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1182       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1183         // The rethrow call may have too many projections to be
1184         // properly handled here. Given there's no reason for a
1185         // barrier to depend on the call, move it above the call
1186         stack.push(lrb, 0);
1187         do {
1188           Node* n = stack.node();
1189           uint idx = stack.index();
1190           if (idx < n->req()) {
1191             Node* in = n->in(idx);
1192             stack.set_index(idx+1);
1193             if (in != NULL) {
1194               if (phase->has_ctrl(in)) {
1195                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1196 #ifdef ASSERT
1197                   for (uint i = 0; i < stack.size(); i++) {
1198                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1199                   }
1200 #endif
1201                   stack.push(in, 0);
1202                 }
1203               } else {
1204                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1205               }
1206             }
1207           } else {
1208             phase->set_ctrl(n, call->in(0));
1209             stack.pop();
1210           }
1211         } while(stack.size() > 0);
1212         continue;
1213       }
1214       CallProjections* projs = call->extract_projections(false, false);


1215 #ifdef ASSERT
1216       VectorSet cloned;
1217 #endif
1218       Node* lrb_clone = lrb->clone();
1219       phase->register_new_node(lrb_clone, projs->catchall_catchproj);
1220       phase->set_ctrl(lrb, projs->fallthrough_catchproj);
1221 
1222       stack.push(lrb, 0);
1223       clones.push(lrb_clone);
1224 
1225       do {
1226         assert(stack.size() == clones.size(), "");
1227         Node* n = stack.node();
1228 #ifdef ASSERT
1229         if (n->is_Load()) {
1230           Node* mem = n->in(MemNode::Memory);
1231           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1232             Node* u = mem->fast_out(j);
1233             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1234           }
1235         }
1236 #endif
1237         uint idx = stack.index();
1238         Node* n_clone = clones.at(clones.size()-1);
1239         if (idx < n->outcnt()) {
1240           Node* u = n->raw_out(idx);
1241           Node* c = phase->ctrl_or_self(u);
1242           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs->fallthrough_proj)) {
1243             stack.set_index(idx+1);
1244             assert(!u->is_CFG(), "");
1245             stack.push(u, 0);
1246             assert(!cloned.test_set(u->_idx), "only one clone");
1247             Node* u_clone = u->clone();
1248             int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1249             assert(nb > 0, "should have replaced some uses");
1250             phase->register_new_node(u_clone, projs->catchall_catchproj);
1251             clones.push(u_clone);
1252             phase->set_ctrl(u, projs->fallthrough_catchproj);
1253           } else {
1254             bool replaced = false;
1255             if (u->is_Phi()) {
1256               for (uint k = 1; k < u->req(); k++) {
1257                 if (u->in(k) == n) {
1258                   if (phase->is_dominator(projs->catchall_catchproj, u->in(0)->in(k))) {
1259                     phase->igvn().replace_input_of(u, k, n_clone);
1260                     replaced = true;
1261                   } else if (!phase->is_dominator(projs->fallthrough_catchproj, u->in(0)->in(k))) {
1262                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1263                     replaced = true;
1264                   }
1265                 }
1266               }
1267             } else {
1268               if (phase->is_dominator(projs->catchall_catchproj, c)) {
1269                 phase->igvn().rehash_node_delayed(u);
1270                 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1271                 assert(nb > 0, "should have replaced some uses");
1272                 replaced = true;
1273               } else if (!phase->is_dominator(projs->fallthrough_catchproj, c)) {
1274                 if (u->is_If()) {
1275                   // Can't break If/Bool/Cmp chain
1276                   assert(n->is_Bool(), "unexpected If shape");
1277                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1278                   assert(n_clone->is_Bool(), "unexpected clone");
1279                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1280                   Node* bol_clone = n->clone();
1281                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1282                   bol_clone->set_req(1, cmp_clone);
1283 
1284                   Node* nn = stack.node_at(stack.size()-3);
1285                   Node* nn_clone = clones.at(clones.size()-3);
1286                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1287 
1288                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1289                                                    &phase->igvn());
1290                   assert(nb > 0, "should have replaced some uses");
1291 
1292                   phase->register_new_node(bol_clone, u->in(0));
1293                   phase->register_new_node(cmp_clone, u->in(0));
1294 
1295                   phase->igvn().replace_input_of(u, 1, bol_clone);
1296 
1297                 } else {
1298                   phase->igvn().rehash_node_delayed(u);
1299                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1300                   assert(nb > 0, "should have replaced some uses");
1301                 }
1302                 replaced = true;
1303               }
1304             }
1305             if (!replaced) {
1306               stack.set_index(idx+1);
1307             }
1308           }
1309         } else {
1310           stack.pop();
1311           clones.pop();
1312         }
1313       } while (stack.size() > 0);
1314       assert(stack.size() == 0 && clones.size() == 0, "");
1315     }
1316   }
1317 
1318   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1319     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1320     Node* ctrl = phase->get_ctrl(lrb);
1321     IdealLoopTree* loop = phase->get_loop(ctrl);
1322     Node* head = loop->head();
1323     if (head->is_OuterStripMinedLoop()) {
1324       // Expanding a barrier here will break loop strip mining
1325       // verification. Transform the loop so the loop nest doesn't
1326       // appear as strip mined.
1327       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1328       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1329     }
1330   }
1331 
1332   // Expand load-reference-barriers
1333   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1334   Unique_Node_List uses_to_ignore;
1335   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1336     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1337     uint last = phase->C->unique();
1338     Node* ctrl = phase->get_ctrl(lrb);
1339     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1340 
1341 
1342     Node* orig_ctrl = ctrl;
1343 
1344     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1345     Node* init_raw_mem = raw_mem;
1346     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1347 
1348     IdealLoopTree *loop = phase->get_loop(ctrl);
1349 
1350     Node* heap_stable_ctrl = NULL;
1351     Node* null_ctrl = NULL;
1352 
1353     assert(val->bottom_type()->make_oopptr(), "need oop");
1354     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1355 
1356     enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1357     Node* region = new RegionNode(PATH_LIMIT);
1358     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1359     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1360 
1361     // Stable path.
1362     int flags = ShenandoahHeap::HAS_FORWARDED;
1363     if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1364       flags |= ShenandoahHeap::WEAK_ROOTS;
1365     }
1366     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1367     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1368 
1369     // Heap stable case
1370     region->init_req(_heap_stable, heap_stable_ctrl);
1371     val_phi->init_req(_heap_stable, val);
1372     raw_mem_phi->init_req(_heap_stable, raw_mem);
1373 
1374     // Test for in-cset, unless it's a native-LRB. Native LRBs need to return NULL
1375     // even for non-cset objects to prevent ressurrection of such objects.
1376     // Wires !in_cset(obj) to slot 2 of region and phis
1377     Node* not_cset_ctrl = NULL;
1378     if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1379       test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1380     }
1381     if (not_cset_ctrl != NULL) {
1382       region->init_req(_not_cset, not_cset_ctrl);
1383       val_phi->init_req(_not_cset, val);
1384       raw_mem_phi->init_req(_not_cset, raw_mem);
1385     } else {
1386       region->del_req(_not_cset);
1387       val_phi->del_req(_not_cset);
1388       raw_mem_phi->del_req(_not_cset);
1389     }
1390 
1391     // Resolve object when orig-value is in cset.
1392     // Make the unconditional resolve for fwdptr.
1393 
1394     // Call lrb-stub and wire up that path in slots 4
1395     Node* result_mem = NULL;
1396 
1397     Node* addr;
1398     if (ShenandoahSelfFixing) {
1399       VectorSet visited;
1400       addr = get_load_addr(phase, visited, lrb);
1401     } else {
1402       addr = phase->igvn().zerocon(T_OBJECT);
1403     }
1404     if (addr->Opcode() == Op_AddP) {
1405       Node* orig_base = addr->in(AddPNode::Base);
1406       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1407       phase->register_new_node(base, ctrl);
1408       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1409         // Field access
1410         addr = addr->clone();
1411         addr->set_req(AddPNode::Base, base);
1412         addr->set_req(AddPNode::Address, base);
1413         phase->register_new_node(addr, ctrl);
1414       } else {
1415         Node* addr2 = addr->in(AddPNode::Address);
1416         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1417               addr2->in(AddPNode::Base) == orig_base) {
1418           addr2 = addr2->clone();
1419           addr2->set_req(AddPNode::Base, base);
1420           addr2->set_req(AddPNode::Address, base);
1421           phase->register_new_node(addr2, ctrl);
1422           addr = addr->clone();
1423           addr->set_req(AddPNode::Base, base);
1424           addr->set_req(AddPNode::Address, addr2);
1425           phase->register_new_node(addr, ctrl);
1426         }
1427       }
1428     }
1429     call_lrb_stub(ctrl, val, addr, result_mem, raw_mem, lrb->decorators(), phase);
1430     region->init_req(_evac_path, ctrl);
1431     val_phi->init_req(_evac_path, val);
1432     raw_mem_phi->init_req(_evac_path, result_mem);
1433 
1434     phase->register_control(region, loop, heap_stable_iff);
1435     Node* out_val = val_phi;
1436     phase->register_new_node(val_phi, region);
1437     phase->register_new_node(raw_mem_phi, region);
1438 
1439     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1440 
1441     ctrl = orig_ctrl;
1442 
1443     phase->igvn().replace_node(lrb, out_val);
1444 
1445     follow_barrier_uses(out_val, ctrl, uses, phase);
1446 
1447     for(uint next = 0; next < uses.size(); next++ ) {
1448       Node *n = uses.at(next);
1449       assert(phase->get_ctrl(n) == ctrl, "bad control");
1450       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1451       phase->set_ctrl(n, region);
1452       follow_barrier_uses(n, ctrl, uses, phase);
1453     }
1454 
1455     // The slow path call produces memory: hook the raw memory phi
1456     // from the expanded load reference barrier with the rest of the graph
1457     // which may require adding memory phis at every post dominated
1458     // region and at enclosing loop heads. Use the memory state
1459     // collected in memory_nodes to fix the memory graph. Update that
1460     // memory state as we go.
1461     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1462   }
1463   // Done expanding load-reference-barriers.
1464   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1465 
1466   for (int i = state->iu_barriers_count() - 1; i >= 0; i--) {
1467     Node* barrier = state->iu_barrier(i);
1468     Node* pre_val = barrier->in(1);
1469 
1470     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1471       ShouldNotReachHere();
1472       continue;
1473     }
1474 
1475     Node* ctrl = phase->get_ctrl(barrier);
1476 
1477     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1478       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1479       ctrl = ctrl->in(0)->in(0);
1480       phase->set_ctrl(barrier, ctrl);
1481     } else if (ctrl->is_CallRuntime()) {
1482       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1483       ctrl = ctrl->in(0);
1484       phase->set_ctrl(barrier, ctrl);
1485     }
1486 
1487     Node* init_ctrl = ctrl;
1488     IdealLoopTree* loop = phase->get_loop(ctrl);
1489     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1490     Node* init_raw_mem = raw_mem;
1491     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1492     Node* heap_stable_ctrl = NULL;
1493     Node* null_ctrl = NULL;
1494     uint last = phase->C->unique();
1495 
1496     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1497     Node* region = new RegionNode(PATH_LIMIT);
1498     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1499 
1500     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1501     Node* region2 = new RegionNode(PATH_LIMIT2);
1502     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1503 
1504     // Stable path.
1505     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1506     region->init_req(_heap_stable, heap_stable_ctrl);
1507     phi->init_req(_heap_stable, raw_mem);
1508 
1509     // Null path
1510     Node* reg2_ctrl = NULL;
1511     test_null(ctrl, pre_val, null_ctrl, phase);
1512     if (null_ctrl != NULL) {
1513       reg2_ctrl = null_ctrl->in(0);
1514       region2->init_req(_null_path, null_ctrl);
1515       phi2->init_req(_null_path, raw_mem);
1516     } else {
1517       region2->del_req(_null_path);
1518       phi2->del_req(_null_path);
1519     }
1520 
1521     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1522     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1523     Node* thread = new ThreadLocalNode();
1524     phase->register_new_node(thread, ctrl);
1525     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1526     phase->register_new_node(buffer_adr, ctrl);
1527     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1528     phase->register_new_node(index_adr, ctrl);
1529 
1530     BasicType index_bt = TypeX_X->basic_type();
1531     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1532     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1533     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1534     phase->register_new_node(index, ctrl);
1535     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1536     phase->register_new_node(index_cmp, ctrl);
1537     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1538     phase->register_new_node(index_test, ctrl);
1539     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1540     if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1541     phase->register_control(queue_full_iff, loop, ctrl);
1542     Node* not_full = new IfTrueNode(queue_full_iff);
1543     phase->register_control(not_full, loop, queue_full_iff);
1544     Node* full = new IfFalseNode(queue_full_iff);
1545     phase->register_control(full, loop, queue_full_iff);
1546 
1547     ctrl = not_full;
1548 
1549     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1550     phase->register_new_node(next_index, ctrl);
1551 
1552     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1553     phase->register_new_node(buffer, ctrl);
1554     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1555     phase->register_new_node(log_addr, ctrl);
1556     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1557     phase->register_new_node(log_store, ctrl);
1558     // update the index
1559     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1560     phase->register_new_node(index_update, ctrl);
1561 
1562     // Fast-path case
1563     region2->init_req(_fast_path, ctrl);
1564     phi2->init_req(_fast_path, index_update);
1565 
1566     ctrl = full;
1567 
1568     Node* base = find_bottom_mem(ctrl, phase);
1569 
1570     MergeMemNode* mm = MergeMemNode::make(base);
1571     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1572     phase->register_new_node(mm, ctrl);
1573 
1574     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1575     call->init_req(TypeFunc::Control, ctrl);
1576     call->init_req(TypeFunc::I_O, phase->C->top());
1577     call->init_req(TypeFunc::Memory, mm);
1578     call->init_req(TypeFunc::FramePtr, phase->C->top());
1579     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1580     call->init_req(TypeFunc::Parms, pre_val);
1581     call->init_req(TypeFunc::Parms+1, thread);
1582     phase->register_control(call, loop, ctrl);
1583 
1584     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1585     phase->register_control(ctrl_proj, loop, call);
1586     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1587     phase->register_new_node(mem_proj, call);
1588 
1589     // Slow-path case
1590     region2->init_req(_slow_path, ctrl_proj);
1591     phi2->init_req(_slow_path, mem_proj);
1592 
1593     phase->register_control(region2, loop, reg2_ctrl);
1594     phase->register_new_node(phi2, region2);
1595 
1596     region->init_req(_heap_unstable, region2);
1597     phi->init_req(_heap_unstable, phi2);
1598 
1599     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1600     phase->register_new_node(phi, region);
1601 
1602     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1603     for(uint next = 0; next < uses.size(); next++ ) {
1604       Node *n = uses.at(next);
1605       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1606       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1607       phase->set_ctrl(n, region);
1608       follow_barrier_uses(n, init_ctrl, uses, phase);
1609     }
1610     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1611 
1612     phase->igvn().replace_node(barrier, pre_val);
1613   }
1614   assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1615 
1616 }
1617 
1618 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1619   if (visited.test_set(in->_idx)) {
1620     return NULL;
1621   }
1622   switch (in->Opcode()) {
1623     case Op_Proj:
1624       return get_load_addr(phase, visited, in->in(0));
1625     case Op_CastPP:
1626     case Op_CheckCastPP:
1627     case Op_DecodeN:
1628     case Op_EncodeP:
1629       return get_load_addr(phase, visited, in->in(1));
1630     case Op_LoadN:
1631     case Op_LoadP:
1632       return in->in(MemNode::Address);
1633     case Op_CompareAndExchangeN:
1634     case Op_CompareAndExchangeP:
1635     case Op_GetAndSetN:
1636     case Op_GetAndSetP:
1637     case Op_ShenandoahCompareAndExchangeP:
1638     case Op_ShenandoahCompareAndExchangeN:
1639       // Those instructions would just have stored a different
1640       // value into the field. No use to attempt to fix it at this point.
1641       return phase->igvn().zerocon(T_OBJECT);
1642     case Op_CMoveP:
1643     case Op_CMoveN: {
1644       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1645       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1646       // Handle unambiguous cases: single address reported on both branches.
1647       if (t != NULL && f == NULL) return t;
1648       if (t == NULL && f != NULL) return f;
1649       if (t != NULL && t == f)    return t;
1650       // Ambiguity.
1651       return phase->igvn().zerocon(T_OBJECT);
1652     }
1653     case Op_Phi: {
1654       Node* addr = NULL;
1655       for (uint i = 1; i < in->req(); i++) {
1656         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1657         if (addr == NULL) {
1658           addr = addr1;
1659         }
1660         if (addr != addr1) {
1661           return phase->igvn().zerocon(T_OBJECT);
1662         }
1663       }
1664       return addr;
1665     }
1666     case Op_ShenandoahLoadReferenceBarrier:
1667       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1668     case Op_ShenandoahIUBarrier:
1669       return get_load_addr(phase, visited, in->in(1));
1670     case Op_CallDynamicJava:
1671     case Op_CallLeaf:
1672     case Op_CallStaticJava:
1673     case Op_ConN:
1674     case Op_ConP:
1675     case Op_Parm:
1676     case Op_CreateEx:
1677       return phase->igvn().zerocon(T_OBJECT);
1678     default:
1679 #ifdef ASSERT
1680       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1681 #endif
1682       return phase->igvn().zerocon(T_OBJECT);
1683   }
1684 
1685 }
1686 
1687 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1688   IdealLoopTree *loop = phase->get_loop(iff);
1689   Node* loop_head = loop->_head;
1690   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1691 
1692   Node* bol = iff->in(1);
1693   Node* cmp = bol->in(1);
1694   Node* andi = cmp->in(1);
1695   Node* load = andi->in(1);
1696 
1697   assert(is_gc_state_load(load), "broken");
1698   if (!phase->is_dominator(load->in(0), entry_c)) {
1699     Node* mem_ctrl = NULL;
1700     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1701     load = load->clone();
1702     load->set_req(MemNode::Memory, mem);
1703     load->set_req(0, entry_c);
1704     phase->register_new_node(load, entry_c);
1705     andi = andi->clone();
1706     andi->set_req(1, load);
1707     phase->register_new_node(andi, entry_c);
1708     cmp = cmp->clone();
1709     cmp->set_req(1, andi);
1710     phase->register_new_node(cmp, entry_c);
1711     bol = bol->clone();
1712     bol->set_req(1, cmp);
1713     phase->register_new_node(bol, entry_c);
1714 
1715     phase->igvn().replace_input_of(iff, 1, bol);
1716   }
1717 }
1718 
1719 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1720   if (!n->is_If() || n->is_CountedLoopEnd()) {
1721     return false;
1722   }
1723   Node* region = n->in(0);
1724 
1725   if (!region->is_Region()) {
1726     return false;
1727   }
1728   Node* dom = phase->idom(region);
1729   if (!dom->is_If()) {
1730     return false;
1731   }
1732 
1733   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1734     return false;
1735   }
1736 
1737   IfNode* dom_if = dom->as_If();
1738   Node* proj_true = dom_if->proj_out(1);
1739   Node* proj_false = dom_if->proj_out(0);
1740 
1741   for (uint i = 1; i < region->req(); i++) {
1742     if (phase->is_dominator(proj_true, region->in(i))) {
1743       continue;
1744     }
1745     if (phase->is_dominator(proj_false, region->in(i))) {
1746       continue;
1747     }
1748     return false;
1749   }
1750 
1751   return true;
1752 }
1753 
1754 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1755   assert(is_heap_stable_test(n), "no other tests");
1756   if (identical_backtoback_ifs(n, phase)) {
1757     Node* n_ctrl = n->in(0);
1758     if (phase->can_split_if(n_ctrl)) {
1759       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1760       if (is_heap_stable_test(n)) {
1761         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1762         assert(is_gc_state_load(gc_state_load), "broken");
1763         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1764         assert(is_gc_state_load(dom_gc_state_load), "broken");
1765         if (gc_state_load != dom_gc_state_load) {
1766           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1767         }
1768       }
1769       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1770       Node* proj_true = dom_if->proj_out(1);
1771       Node* proj_false = dom_if->proj_out(0);
1772       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1773       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1774 
1775       for (uint i = 1; i < n_ctrl->req(); i++) {
1776         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1777           bolphi->init_req(i, con_true);
1778         } else {
1779           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1780           bolphi->init_req(i, con_false);
1781         }
1782       }
1783       phase->register_new_node(bolphi, n_ctrl);
1784       phase->igvn().replace_input_of(n, 1, bolphi);
1785       phase->do_split_if(n);
1786     }
1787   }
1788 }
1789 
1790 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1791   // Find first invariant test that doesn't exit the loop
1792   LoopNode *head = loop->_head->as_Loop();
1793   IfNode* unswitch_iff = NULL;
1794   Node* n = head->in(LoopNode::LoopBackControl);
1795   int loop_has_sfpts = -1;
1796   while (n != head) {
1797     Node* n_dom = phase->idom(n);
1798     if (n->is_Region()) {
1799       if (n_dom->is_If()) {
1800         IfNode* iff = n_dom->as_If();
1801         if (iff->in(1)->is_Bool()) {
1802           BoolNode* bol = iff->in(1)->as_Bool();
1803           if (bol->in(1)->is_Cmp()) {
1804             // If condition is invariant and not a loop exit,
1805             // then found reason to unswitch.
1806             if (is_heap_stable_test(iff) &&
1807                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1808               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1809               if (loop_has_sfpts == -1) {
1810                 for(uint i = 0; i < loop->_body.size(); i++) {
1811                   Node *m = loop->_body[i];
1812                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1813                     loop_has_sfpts = 1;
1814                     break;
1815                   }
1816                 }
1817                 if (loop_has_sfpts == -1) {
1818                   loop_has_sfpts = 0;
1819                 }
1820               }
1821               if (!loop_has_sfpts) {
1822                 unswitch_iff = iff;
1823               }
1824             }
1825           }
1826         }
1827       }
1828     }
1829     n = n_dom;
1830   }
1831   return unswitch_iff;
1832 }
1833 
1834 
1835 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1836   Node_List heap_stable_tests;
1837   stack.push(phase->C->start(), 0);
1838   do {
1839     Node* n = stack.node();
1840     uint i = stack.index();
1841 
1842     if (i < n->outcnt()) {
1843       Node* u = n->raw_out(i);
1844       stack.set_index(i+1);
1845       if (!visited.test_set(u->_idx)) {
1846         stack.push(u, 0);
1847       }
1848     } else {
1849       stack.pop();
1850       if (n->is_If() && is_heap_stable_test(n)) {
1851         heap_stable_tests.push(n);
1852       }
1853     }
1854   } while (stack.size() > 0);
1855 
1856   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1857     Node* n = heap_stable_tests.at(i);
1858     assert(is_heap_stable_test(n), "only evacuation test");
1859     merge_back_to_back_tests(n, phase);
1860   }
1861 
1862   if (!phase->C->major_progress()) {
1863     VectorSet seen;
1864     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1865       Node* n = heap_stable_tests.at(i);
1866       IdealLoopTree* loop = phase->get_loop(n);
1867       if (loop != phase->ltree_root() &&
1868           loop->_child == NULL &&
1869           !loop->_irreducible) {
1870         Node* head = loop->_head;
1871         if (head->is_Loop() &&
1872             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1873             !seen.test_set(head->_idx)) {
1874           IfNode* iff = find_unswitching_candidate(loop, phase);
1875           if (iff != NULL) {
1876             Node* bol = iff->in(1);
1877             if (head->as_Loop()->is_strip_mined()) {
1878               head->as_Loop()->verify_strip_mined(0);
1879             }
1880             move_gc_state_test_out_of_loop(iff, phase);
1881 
1882             AutoNodeBudget node_budget(phase);
1883 
1884             if (loop->policy_unswitching(phase)) {
1885               if (head->as_Loop()->is_strip_mined()) {
1886                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1887                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1888               }
1889               phase->do_unswitching(loop, old_new);
1890             } else {
1891               // Not proceeding with unswitching. Move load back in
1892               // the loop.
1893               phase->igvn().replace_input_of(iff, 1, bol);
1894             }
1895           }
1896         }
1897       }
1898     }
1899   }
1900 }
1901 
1902 #ifdef ASSERT
1903 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
1904   const bool trace = false;
1905   ResourceMark rm;
1906   Unique_Node_List nodes;
1907   Unique_Node_List controls;
1908   Unique_Node_List memories;
1909 
1910   nodes.push(root);
1911   for (uint next = 0; next < nodes.size(); next++) {
1912     Node *n  = nodes.at(next);
1913     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
1914       controls.push(n);
1915       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
1916       for (uint next2 = 0; next2 < controls.size(); next2++) {
1917         Node *m = controls.at(next2);
1918         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1919           Node* u = m->fast_out(i);
1920           if (u->is_CFG() && !u->is_Root() &&
1921               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
1922               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
1923             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
1924             controls.push(u);
1925           }
1926         }
1927       }
1928       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
1929       for (uint next2 = 0; next2 < memories.size(); next2++) {
1930         Node *m = memories.at(next2);
1931         assert(m->bottom_type() == Type::MEMORY, "");
1932         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1933           Node* u = m->fast_out(i);
1934           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
1935             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1936             memories.push(u);
1937           } else if (u->is_LoadStore()) {
1938             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
1939             memories.push(u->find_out_with(Op_SCMemProj));
1940           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
1941             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1942             memories.push(u);
1943           } else if (u->is_Phi()) {
1944             assert(u->bottom_type() == Type::MEMORY, "");
1945             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
1946               assert(controls.member(u->in(0)), "");
1947               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1948               memories.push(u);
1949             }
1950           } else if (u->is_SafePoint() || u->is_MemBar()) {
1951             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
1952               Node* uu = u->fast_out(j);
1953               if (uu->bottom_type() == Type::MEMORY) {
1954                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
1955                 memories.push(uu);
1956               }
1957             }
1958           }
1959         }
1960       }
1961       for (uint next2 = 0; next2 < controls.size(); next2++) {
1962         Node *m = controls.at(next2);
1963         if (m->is_Region()) {
1964           bool all_in = true;
1965           for (uint i = 1; i < m->req(); i++) {
1966             if (!controls.member(m->in(i))) {
1967               all_in = false;
1968               break;
1969             }
1970           }
1971           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
1972           bool found_phi = false;
1973           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
1974             Node* u = m->fast_out(j);
1975             if (u->is_Phi() && memories.member(u)) {
1976               found_phi = true;
1977               for (uint i = 1; i < u->req() && found_phi; i++) {
1978                 Node* k = u->in(i);
1979                 if (memories.member(k) != controls.member(m->in(i))) {
1980                   found_phi = false;
1981                 }
1982               }
1983             }
1984           }
1985           assert(found_phi || all_in, "");
1986         }
1987       }
1988       controls.clear();
1989       memories.clear();
1990     }
1991     for( uint i = 0; i < n->len(); ++i ) {
1992       Node *m = n->in(i);
1993       if (m != NULL) {
1994         nodes.push(m);
1995       }
1996     }
1997   }
1998 }
1999 #endif
2000 
2001 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(NULL, val) {
2002   ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
2003 }
2004 
2005 const Type* ShenandoahIUBarrierNode::bottom_type() const {
2006   if (in(1) == NULL || in(1)->is_top()) {
2007     return Type::TOP;
2008   }
2009   const Type* t = in(1)->bottom_type();
2010   if (t == TypePtr::NULL_PTR) {
2011     return t;
2012   }
2013   return t->is_oopptr();
2014 }
2015 
2016 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
2017   if (in(1) == NULL) {
2018     return Type::TOP;
2019   }
2020   const Type* t = phase->type(in(1));
2021   if (t == Type::TOP) {
2022     return Type::TOP;
2023   }
2024   if (t == TypePtr::NULL_PTR) {
2025     return t;
2026   }
2027   return t->is_oopptr();
2028 }
2029 
2030 int ShenandoahIUBarrierNode::needed(Node* n) {
2031   if (n == NULL ||
2032       n->is_Allocate() ||
2033       n->Opcode() == Op_ShenandoahIUBarrier ||
2034       n->bottom_type() == TypePtr::NULL_PTR ||
2035       (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2036     return NotNeeded;
2037   }
2038   if (n->is_Phi() ||
2039       n->is_CMove()) {
2040     return MaybeNeeded;
2041   }
2042   return Needed;
2043 }
2044 
2045 Node* ShenandoahIUBarrierNode::next(Node* n) {
2046   for (;;) {
2047     if (n == NULL) {
2048       return n;
2049     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2050       return n;
2051     } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2052       return n;
2053     } else if (n->is_ConstraintCast() ||
2054                n->Opcode() == Op_DecodeN ||
2055                n->Opcode() == Op_EncodeP) {
2056       n = n->in(1);
2057     } else if (n->is_Proj()) {
2058       n = n->in(0);
2059     } else {
2060       return n;
2061     }
2062   }
2063   ShouldNotReachHere();
2064   return NULL;
2065 }
2066 
2067 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
2068   PhaseIterGVN* igvn = phase->is_IterGVN();
2069 
2070   Node* n = next(in(1));
2071 
2072   int cont = needed(n);
2073 
2074   if (cont == NotNeeded) {
2075     return in(1);
2076   } else if (cont == MaybeNeeded) {
2077     if (igvn == NULL) {
2078       phase->record_for_igvn(this);
2079       return this;
2080     } else {
2081       ResourceMark rm;
2082       Unique_Node_List wq;
2083       uint wq_i = 0;
2084 
2085       for (;;) {
2086         if (n->is_Phi()) {
2087           for (uint i = 1; i < n->req(); i++) {
2088             Node* m = n->in(i);
2089             if (m != NULL) {
2090               wq.push(m);
2091             }
2092           }
2093         } else {
2094           assert(n->is_CMove(), "nothing else here");
2095           Node* m = n->in(CMoveNode::IfFalse);
2096           wq.push(m);
2097           m = n->in(CMoveNode::IfTrue);
2098           wq.push(m);
2099         }
2100         Node* orig_n = NULL;
2101         do {
2102           if (wq_i >= wq.size()) {
2103             return in(1);
2104           }
2105           n = wq.at(wq_i);
2106           wq_i++;
2107           orig_n = n;
2108           n = next(n);
2109           cont = needed(n);
2110           if (cont == Needed) {
2111             return this;
2112           }
2113         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2114       }
2115     }
2116   }
2117 
2118   return this;
2119 }
2120 
2121 #ifdef ASSERT
2122 static bool has_never_branch(Node* root) {
2123   for (uint i = 1; i < root->req(); i++) {
2124     Node* in = root->in(i);
2125     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2126       return true;
2127     }
2128   }
2129   return false;
2130 }
2131 #endif
2132 
2133 void MemoryGraphFixer::collect_memory_nodes() {
2134   Node_Stack stack(0);
2135   VectorSet visited;
2136   Node_List regions;
2137 
2138   // Walk the raw memory graph and create a mapping from CFG node to
2139   // memory node. Exclude phis for now.
2140   stack.push(_phase->C->root(), 1);
2141   do {
2142     Node* n = stack.node();
2143     int opc = n->Opcode();
2144     uint i = stack.index();
2145     if (i < n->req()) {
2146       Node* mem = NULL;
2147       if (opc == Op_Root) {
2148         Node* in = n->in(i);
2149         int in_opc = in->Opcode();
2150         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2151           mem = in->in(TypeFunc::Memory);
2152         } else if (in_opc == Op_Halt) {
2153           if (in->in(0)->is_Region()) {
2154             Node* r = in->in(0);
2155             for (uint j = 1; j < r->req(); j++) {
2156               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2157             }
2158           } else {
2159             Node* proj = in->in(0);
2160             assert(proj->is_Proj(), "");
2161             Node* in = proj->in(0);
2162             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2163             if (in->is_CallStaticJava()) {
2164               mem = in->in(TypeFunc::Memory);
2165             } else if (in->Opcode() == Op_Catch) {
2166               Node* call = in->in(0)->in(0);
2167               assert(call->is_Call(), "");
2168               mem = call->in(TypeFunc::Memory);
2169             } else if (in->Opcode() == Op_NeverBranch) {
2170               mem = collect_memory_for_infinite_loop(in);
2171             }
2172           }
2173         } else {
2174 #ifdef ASSERT
2175           n->dump();
2176           in->dump();
2177 #endif
2178           ShouldNotReachHere();
2179         }
2180       } else {
2181         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2182         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2183         mem = n->in(i);
2184       }
2185       i++;
2186       stack.set_index(i);
2187       if (mem == NULL) {
2188         continue;
2189       }
2190       for (;;) {
2191         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2192           break;
2193         }
2194         if (mem->is_Phi()) {
2195           stack.push(mem, 2);
2196           mem = mem->in(1);
2197         } else if (mem->is_Proj()) {
2198           stack.push(mem, mem->req());
2199           mem = mem->in(0);
2200         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2201           mem = mem->in(TypeFunc::Memory);
2202         } else if (mem->is_MergeMem()) {
2203           MergeMemNode* mm = mem->as_MergeMem();
2204           mem = mm->memory_at(_alias);
2205         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2206           assert(_alias == Compile::AliasIdxRaw, "");
2207           stack.push(mem, mem->req());
2208           mem = mem->in(MemNode::Memory);
2209         } else {
2210 #ifdef ASSERT
2211           mem->dump();
2212 #endif
2213           ShouldNotReachHere();
2214         }
2215       }
2216     } else {
2217       if (n->is_Phi()) {
2218         // Nothing
2219       } else if (!n->is_Root()) {
2220         Node* c = get_ctrl(n);
2221         _memory_nodes.map(c->_idx, n);
2222       }
2223       stack.pop();
2224     }
2225   } while(stack.is_nonempty());
2226 
2227   // Iterate over CFG nodes in rpo and propagate memory state to
2228   // compute memory state at regions, creating new phis if needed.
2229   Node_List rpo_list;
2230   visited.clear();
2231   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2232   Node* root = rpo_list.pop();
2233   assert(root == _phase->C->root(), "");
2234 
2235   const bool trace = false;
2236 #ifdef ASSERT
2237   if (trace) {
2238     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2239       Node* c = rpo_list.at(i);
2240       if (_memory_nodes[c->_idx] != NULL) {
2241         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2242       }
2243     }
2244   }
2245 #endif
2246   uint last = _phase->C->unique();
2247 
2248 #ifdef ASSERT
2249   uint16_t max_depth = 0;
2250   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2251     IdealLoopTree* lpt = iter.current();
2252     max_depth = MAX2(max_depth, lpt->_nest);
2253   }
2254 #endif
2255 
2256   bool progress = true;
2257   int iteration = 0;
2258   Node_List dead_phis;
2259   while (progress) {
2260     progress = false;
2261     iteration++;
2262     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2263     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2264 
2265     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2266       Node* c = rpo_list.at(i);
2267 
2268       Node* prev_mem = _memory_nodes[c->_idx];
2269       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2270         Node* prev_region = regions[c->_idx];
2271         Node* unique = NULL;
2272         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2273           Node* m = _memory_nodes[c->in(j)->_idx];
2274           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2275           if (m != NULL) {
2276             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2277               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2278               // continue
2279             } else if (unique == NULL) {
2280               unique = m;
2281             } else if (m == unique) {
2282               // continue
2283             } else {
2284               unique = NodeSentinel;
2285             }
2286           }
2287         }
2288         assert(unique != NULL, "empty phi???");
2289         if (unique != NodeSentinel) {
2290           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2291             dead_phis.push(prev_region);
2292           }
2293           regions.map(c->_idx, unique);
2294         } else {
2295           Node* phi = NULL;
2296           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2297             phi = prev_region;
2298             for (uint k = 1; k < c->req(); k++) {
2299               Node* m = _memory_nodes[c->in(k)->_idx];
2300               assert(m != NULL, "expect memory state");
2301               phi->set_req(k, m);
2302             }
2303           } else {
2304             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2305               Node* u = c->fast_out(j);
2306               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2307                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2308                 phi = u;
2309                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2310                   Node* m = _memory_nodes[c->in(k)->_idx];
2311                   assert(m != NULL, "expect memory state");
2312                   if (u->in(k) != m) {
2313                     phi = NodeSentinel;
2314                   }
2315                 }
2316               }
2317             }
2318             if (phi == NodeSentinel) {
2319               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2320               for (uint k = 1; k < c->req(); k++) {
2321                 Node* m = _memory_nodes[c->in(k)->_idx];
2322                 assert(m != NULL, "expect memory state");
2323                 phi->init_req(k, m);
2324               }
2325             }
2326           }
2327           if (phi != NULL) {
2328             regions.map(c->_idx, phi);
2329           } else {
2330             assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2331           }
2332         }
2333         Node* current_region = regions[c->_idx];
2334         if (current_region != prev_region) {
2335           progress = true;
2336           if (prev_region == prev_mem) {
2337             _memory_nodes.map(c->_idx, current_region);
2338           }
2339         }
2340       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2341         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2342         assert(m != NULL || c->Opcode() == Op_Halt, "expect memory state");
2343         if (m != prev_mem) {
2344           _memory_nodes.map(c->_idx, m);
2345           progress = true;
2346         }
2347       }
2348 #ifdef ASSERT
2349       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2350 #endif
2351     }
2352   }
2353 
2354   // Replace existing phi with computed memory state for that region
2355   // if different (could be a new phi or a dominating memory node if
2356   // that phi was found to be useless).
2357   while (dead_phis.size() > 0) {
2358     Node* n = dead_phis.pop();
2359     n->replace_by(_phase->C->top());
2360     n->destruct(&_phase->igvn());
2361   }
2362   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2363     Node* c = rpo_list.at(i);
2364     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2365       Node* n = regions[c->_idx];
2366       assert(n != NULL || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2367       if (n != NULL && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2368         _phase->register_new_node(n, c);
2369       }
2370     }
2371   }
2372   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2373     Node* c = rpo_list.at(i);
2374     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2375       Node* n = regions[c->_idx];
2376       assert(n != NULL || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2377       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2378         Node* u = c->fast_out(i);
2379         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2380             u != n) {
2381           assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
2382           if (u->adr_type() == TypePtr::BOTTOM) {
2383             fix_memory_uses(u, n, n, c);
2384           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2385             _phase->lazy_replace(u, n);
2386             --i; --imax;
2387           }
2388         }
2389       }
2390     }
2391   }
2392 }
2393 
2394 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
2395   Node* mem = NULL;
2396   Node* head = in->in(0);
2397   assert(head->is_Region(), "unexpected infinite loop graph shape");
2398 
2399   Node* phi_mem = NULL;
2400   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2401     Node* u = head->fast_out(j);
2402     if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2403       if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2404         assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2405         phi_mem = u;
2406       } else if (u->adr_type() == TypePtr::BOTTOM) {
2407         assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2408         if (phi_mem == NULL) {
2409           phi_mem = u;
2410         }
2411       }
2412     }
2413   }
2414   if (phi_mem == NULL) {
2415     ResourceMark rm;
2416     Node_Stack stack(0);
2417     stack.push(head, 1);
2418     do {
2419       Node* n = stack.node();
2420       uint i = stack.index();
2421       if (i >= n->req()) {
2422         stack.pop();
2423       } else {
2424         stack.set_index(i + 1);
2425         Node* c = n->in(i);
2426         assert(c != head, "should have found a safepoint on the way");
2427         if (stack.size() != 1 || _phase->is_dominator(head, c)) {
2428           for (;;) {
2429             if (c->is_Region()) {
2430               stack.push(c, 1);
2431               break;
2432             } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
2433               Node* m = c->in(TypeFunc::Memory);
2434               if (m->is_MergeMem()) {
2435                 m = m->as_MergeMem()->memory_at(_alias);
2436               }
2437               assert(mem == NULL || mem == m, "several memory states");
2438               mem = m;
2439               break;
2440             } else {
2441               assert(c != c->in(0), "");
2442               c = c->in(0);
2443             }
2444           }
2445         }
2446       }
2447     } while (stack.size() > 0);
2448     assert(mem != NULL, "should have found safepoint");
2449   } else {
2450     mem = phi_mem;
2451   }
2452   return mem;
2453 }
2454 
2455 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2456   Node* c = _phase->get_ctrl(n);
2457   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2458     assert(c == n->in(0), "");
2459     CallNode* call = c->as_Call();
2460     CallProjections* projs = call->extract_projections(true, false);
2461     if (projs->catchall_memproj != NULL) {
2462       if (projs->fallthrough_memproj == n) {
2463         c = projs->fallthrough_catchproj;

2464       } else {
2465         assert(projs->catchall_memproj == n, "");
2466         c = projs->catchall_catchproj;
2467       }
2468     }
2469   }
2470   return c;
2471 }
2472 
2473 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2474   if (_phase->has_ctrl(n))
2475     return get_ctrl(n);
2476   else {
2477     assert (n->is_CFG(), "must be a CFG node");
2478     return n;
2479   }
2480 }
2481 
2482 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2483   return m != NULL && get_ctrl(m) == c;
2484 }
2485 
2486 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2487   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2488   assert(!ctrl->is_Call() || ctrl == n, "projection expected");
2489 #ifdef ASSERT
2490   if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
2491       (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
2492     CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
2493     int mems = 0;
2494     for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
2495       Node* u = call->fast_out(i);
2496       if (u->bottom_type() == Type::MEMORY) {
2497         mems++;
2498       }
2499     }
2500     assert(mems <= 1, "No node right after call if multiple mem projections");
2501   }
2502 #endif
2503   Node* mem = _memory_nodes[ctrl->_idx];
2504   Node* c = ctrl;
2505   while (!mem_is_valid(mem, c) &&
2506          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2507     c = _phase->idom(c);
2508     mem = _memory_nodes[c->_idx];
2509   }
2510   if (n != NULL && mem_is_valid(mem, c)) {
2511     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2512       mem = next_mem(mem, _alias);
2513     }
2514     if (mem->is_MergeMem()) {
2515       mem = mem->as_MergeMem()->memory_at(_alias);
2516     }
2517     if (!mem_is_valid(mem, c)) {
2518       do {
2519         c = _phase->idom(c);
2520         mem = _memory_nodes[c->_idx];
2521       } while (!mem_is_valid(mem, c) &&
2522                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2523     }
2524   }
2525   assert(mem->bottom_type() == Type::MEMORY, "");
2526   return mem;
2527 }
2528 
2529 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2530   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2531     Node* use = region->fast_out(i);
2532     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2533         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2534       return true;
2535     }
2536   }
2537   return false;
2538 }
2539 
2540 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2541   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2542   const bool trace = false;
2543   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2544   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2545   GrowableArray<Node*> phis;
2546   if (mem_for_ctrl != mem) {
2547     Node* old = mem_for_ctrl;
2548     Node* prev = NULL;
2549     while (old != mem) {
2550       prev = old;
2551       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2552         assert(_alias == Compile::AliasIdxRaw, "");
2553         old = old->in(MemNode::Memory);
2554       } else if (old->Opcode() == Op_SCMemProj) {
2555         assert(_alias == Compile::AliasIdxRaw, "");
2556         old = old->in(0);
2557       } else {
2558         ShouldNotReachHere();
2559       }
2560     }
2561     assert(prev != NULL, "");
2562     if (new_ctrl != ctrl) {
2563       _memory_nodes.map(ctrl->_idx, mem);
2564       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2565     }
2566     uint input = (uint)MemNode::Memory;
2567     _phase->igvn().replace_input_of(prev, input, new_mem);
2568   } else {
2569     uses.clear();
2570     _memory_nodes.map(new_ctrl->_idx, new_mem);
2571     uses.push(new_ctrl);
2572     for(uint next = 0; next < uses.size(); next++ ) {
2573       Node *n = uses.at(next);
2574       assert(n->is_CFG(), "");
2575       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2576       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2577         Node* u = n->fast_out(i);
2578         if (!u->is_Root() && u->is_CFG() && u != n) {
2579           Node* m = _memory_nodes[u->_idx];
2580           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2581               !has_mem_phi(u) &&
2582               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2583             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2584             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2585 
2586             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2587               bool push = true;
2588               bool create_phi = true;
2589               if (_phase->is_dominator(new_ctrl, u)) {
2590                 create_phi = false;
2591               }
2592               if (create_phi) {
2593                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2594                 _phase->register_new_node(phi, u);
2595                 phis.push(phi);
2596                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2597                 if (!mem_is_valid(m, u)) {
2598                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2599                   _memory_nodes.map(u->_idx, phi);
2600                 } else {
2601                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2602                   for (;;) {
2603                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2604                     Node* next = NULL;
2605                     if (m->is_Proj()) {
2606                       next = m->in(0);
2607                     } else {
2608                       assert(m->is_Mem() || m->is_LoadStore(), "");
2609                       assert(_alias == Compile::AliasIdxRaw, "");
2610                       next = m->in(MemNode::Memory);
2611                     }
2612                     if (_phase->get_ctrl(next) != u) {
2613                       break;
2614                     }
2615                     if (next->is_MergeMem()) {
2616                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2617                       break;
2618                     }
2619                     if (next->is_Phi()) {
2620                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2621                       break;
2622                     }
2623                     m = next;
2624                   }
2625 
2626                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2627                   assert(m->is_Mem() || m->is_LoadStore(), "");
2628                   uint input = (uint)MemNode::Memory;
2629                   _phase->igvn().replace_input_of(m, input, phi);
2630                   push = false;
2631                 }
2632               } else {
2633                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2634               }
2635               if (push) {
2636                 uses.push(u);
2637               }
2638             }
2639           } else if (!mem_is_valid(m, u) &&
2640                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2641             uses.push(u);
2642           }
2643         }
2644       }
2645     }
2646     for (int i = 0; i < phis.length(); i++) {
2647       Node* n = phis.at(i);
2648       Node* r = n->in(0);
2649       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2650       for (uint j = 1; j < n->req(); j++) {
2651         Node* m = find_mem(r->in(j), NULL);
2652         _phase->igvn().replace_input_of(n, j, m);
2653         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2654       }
2655     }
2656   }
2657   uint last = _phase->C->unique();
2658   MergeMemNode* mm = NULL;
2659   int alias = _alias;
2660   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2661   // Process loads first to not miss an anti-dependency: if the memory
2662   // edge of a store is updated before a load is processed then an
2663   // anti-dependency may be missed.
2664   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2665     Node* u = mem->out(i);
2666     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2667       Node* m = find_mem(_phase->get_ctrl(u), u);
2668       if (m != mem) {
2669         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2670         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2671         --i;
2672       }
2673     }
2674   }
2675   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2676     Node* u = mem->out(i);
2677     if (u->_idx < last) {
2678       if (u->is_Mem()) {
2679         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2680           Node* m = find_mem(_phase->get_ctrl(u), u);
2681           if (m != mem) {
2682             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2683             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2684             --i;
2685           }
2686         }
2687       } else if (u->is_MergeMem()) {
2688         MergeMemNode* u_mm = u->as_MergeMem();
2689         if (u_mm->memory_at(alias) == mem) {
2690           MergeMemNode* newmm = NULL;
2691           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2692             Node* uu = u->fast_out(j);
2693             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2694             if (uu->is_Phi()) {
2695               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2696               Node* region = uu->in(0);
2697               int nb = 0;
2698               for (uint k = 1; k < uu->req(); k++) {
2699                 if (uu->in(k) == u) {
2700                   Node* m = find_mem(region->in(k), NULL);
2701                   if (m != mem) {
2702                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2703                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2704                     if (newmm != u) {
2705                       _phase->igvn().replace_input_of(uu, k, newmm);
2706                       nb++;
2707                       --jmax;
2708                     }
2709                   }
2710                 }
2711               }
2712               if (nb > 0) {
2713                 --j;
2714               }
2715             } else {
2716               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2717               if (m != mem) {
2718                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2719                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2720                 if (newmm != u) {
2721                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2722                   --j, --jmax;
2723                 }
2724               }
2725             }
2726           }
2727         }
2728       } else if (u->is_Phi()) {
2729         assert(u->bottom_type() == Type::MEMORY, "what else?");
2730         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2731           Node* region = u->in(0);
2732           bool replaced = false;
2733           for (uint j = 1; j < u->req(); j++) {
2734             if (u->in(j) == mem) {
2735               Node* m = find_mem(region->in(j), NULL);
2736               Node* nnew = m;
2737               if (m != mem) {
2738                 if (u->adr_type() == TypePtr::BOTTOM) {
2739                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2740                   nnew = mm;
2741                 }
2742                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2743                 _phase->igvn().replace_input_of(u, j, nnew);
2744                 replaced = true;
2745               }
2746             }
2747           }
2748           if (replaced) {
2749             --i;
2750           }
2751         }
2752       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2753                  u->adr_type() == NULL) {
2754         assert(u->adr_type() != NULL ||
2755                u->Opcode() == Op_Rethrow ||
2756                u->Opcode() == Op_Return ||
2757                u->Opcode() == Op_SafePoint ||
2758                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2759                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2760                u->Opcode() == Op_CallLeaf, "");
2761         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2762         if (m != mem) {
2763           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2764           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2765           --i;
2766         }
2767       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2768         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2769         if (m != mem) {
2770           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2771           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2772           --i;
2773         }
2774       } else if (u->adr_type() != TypePtr::BOTTOM &&
2775                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2776         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2777         assert(m != mem, "");
2778         // u is on the wrong slice...
2779         assert(u->is_ClearArray(), "");
2780         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2781         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2782         --i;
2783       }
2784     }
2785   }
2786 #ifdef ASSERT
2787   assert(new_mem->outcnt() > 0, "");
2788   for (int i = 0; i < phis.length(); i++) {
2789     Node* n = phis.at(i);
2790     assert(n->outcnt() > 0, "new phi must have uses now");
2791   }
2792 #endif
2793 }
2794 
2795 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2796   MergeMemNode* mm = MergeMemNode::make(mem);
2797   mm->set_memory_at(_alias, rep_proj);
2798   _phase->register_new_node(mm, rep_ctrl);
2799   return mm;
2800 }
2801 
2802 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2803   MergeMemNode* newmm = NULL;
2804   MergeMemNode* u_mm = u->as_MergeMem();
2805   Node* c = _phase->get_ctrl(u);
2806   if (_phase->is_dominator(c, rep_ctrl)) {
2807     c = rep_ctrl;
2808   } else {
2809     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2810   }
2811   if (u->outcnt() == 1) {
2812     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2813       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2814       --i;
2815     } else {
2816       _phase->igvn().rehash_node_delayed(u);
2817       u_mm->set_memory_at(_alias, rep_proj);
2818     }
2819     newmm = u_mm;
2820     _phase->set_ctrl_and_loop(u, c);
2821   } else {
2822     // can't simply clone u and then change one of its input because
2823     // it adds and then removes an edge which messes with the
2824     // DUIterator
2825     newmm = MergeMemNode::make(u_mm->base_memory());
2826     for (uint j = 0; j < u->req(); j++) {
2827       if (j < newmm->req()) {
2828         if (j == (uint)_alias) {
2829           newmm->set_req(j, rep_proj);
2830         } else if (newmm->in(j) != u->in(j)) {
2831           newmm->set_req(j, u->in(j));
2832         }
2833       } else if (j == (uint)_alias) {
2834         newmm->add_req(rep_proj);
2835       } else {
2836         newmm->add_req(u->in(j));
2837       }
2838     }
2839     if ((uint)_alias >= u->req()) {
2840       newmm->set_memory_at(_alias, rep_proj);
2841     }
2842     _phase->register_new_node(newmm, c);
2843   }
2844   return newmm;
2845 }
2846 
2847 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2848   if (phi->adr_type() == TypePtr::BOTTOM) {
2849     Node* region = phi->in(0);
2850     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2851       Node* uu = region->fast_out(j);
2852       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2853         return false;
2854       }
2855     }
2856     return true;
2857   }
2858   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2859 }
2860 
2861 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2862   uint last = _phase-> C->unique();
2863   MergeMemNode* mm = NULL;
2864   assert(mem->bottom_type() == Type::MEMORY, "");
2865   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2866     Node* u = mem->out(i);
2867     if (u != replacement && u->_idx < last) {
2868       if (u->is_MergeMem()) {
2869         MergeMemNode* u_mm = u->as_MergeMem();
2870         if (u_mm->memory_at(_alias) == mem) {
2871           MergeMemNode* newmm = NULL;
2872           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2873             Node* uu = u->fast_out(j);
2874             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2875             if (uu->is_Phi()) {
2876               if (should_process_phi(uu)) {
2877                 Node* region = uu->in(0);
2878                 int nb = 0;
2879                 for (uint k = 1; k < uu->req(); k++) {
2880                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2881                     if (newmm == NULL) {
2882                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2883                     }
2884                     if (newmm != u) {
2885                       _phase->igvn().replace_input_of(uu, k, newmm);
2886                       nb++;
2887                       --jmax;
2888                     }
2889                   }
2890                 }
2891                 if (nb > 0) {
2892                   --j;
2893                 }
2894               }
2895             } else {
2896               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2897                 if (newmm == NULL) {
2898                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2899                 }
2900                 if (newmm != u) {
2901                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2902                   --j, --jmax;
2903                 }
2904               }
2905             }
2906           }
2907         }
2908       } else if (u->is_Phi()) {
2909         assert(u->bottom_type() == Type::MEMORY, "what else?");
2910         Node* region = u->in(0);
2911         if (should_process_phi(u)) {
2912           bool replaced = false;
2913           for (uint j = 1; j < u->req(); j++) {
2914             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2915               Node* nnew = rep_proj;
2916               if (u->adr_type() == TypePtr::BOTTOM) {
2917                 if (mm == NULL) {
2918                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2919                 }
2920                 nnew = mm;
2921               }
2922               _phase->igvn().replace_input_of(u, j, nnew);
2923               replaced = true;
2924             }
2925           }
2926           if (replaced) {
2927             --i;
2928           }
2929 
2930         }
2931       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2932                  u->adr_type() == NULL) {
2933         assert(u->adr_type() != NULL ||
2934                u->Opcode() == Op_Rethrow ||
2935                u->Opcode() == Op_Return ||
2936                u->Opcode() == Op_SafePoint ||
2937                u->Opcode() == Op_StoreIConditional ||
2938                u->Opcode() == Op_StoreLConditional ||
2939                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2940                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2941                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2942         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2943           if (mm == NULL) {
2944             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2945           }
2946           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2947           --i;
2948         }
2949       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2950         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2951           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2952           --i;
2953         }
2954       }
2955     }
2956   }
2957 }
2958 
2959 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2960 : Node(ctrl, obj), _decorators(decorators) {
2961   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2962 }
2963 
2964 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2965   return _decorators;
2966 }
2967 
2968 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2969   return sizeof(*this);
2970 }
2971 
2972 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2973   return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2974 }
2975 
2976 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2977   uint hash = Node::hash();
2978   hash += mask_decorators(_decorators);
2979   return hash;
2980 }
2981 
2982 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2983   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2984          mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2985 }
2986 
2987 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2988   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
2989     return Type::TOP;
2990   }
2991   const Type* t = in(ValueIn)->bottom_type();
2992   if (t == TypePtr::NULL_PTR) {
2993     return t;
2994   }
2995 
2996   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2997     return t;
2998   }
2999 
3000   return t->meet(TypePtr::NULL_PTR);
3001 }
3002 
3003 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3004   // Either input is TOP ==> the result is TOP
3005   const Type *t2 = phase->type(in(ValueIn));
3006   if( t2 == Type::TOP ) return Type::TOP;
3007 
3008   if (t2 == TypePtr::NULL_PTR) {
3009     return t2;
3010   }
3011 
3012   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
3013     return t2;
3014   }
3015 
3016   return t2->meet(TypePtr::NULL_PTR);
3017 }
3018 
3019 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3020   Node* value = in(ValueIn);
3021   if (!needs_barrier(phase, value)) {
3022     return value;
3023   }
3024   return this;
3025 }
3026 
3027 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3028   Unique_Node_List visited;
3029   return needs_barrier_impl(phase, n, visited);
3030 }
3031 
3032 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3033   if (n == NULL) return false;
3034   if (visited.member(n)) {
3035     return false; // Been there.
3036   }
3037   visited.push(n);
3038 
3039   if (n->is_Allocate()) {
3040     // tty->print_cr("optimize barrier on alloc");
3041     return false;
3042   }
3043   if (n->is_Call()) {
3044     // tty->print_cr("optimize barrier on call");
3045     return false;
3046   }
3047 
3048   const Type* type = phase->type(n);
3049   if (type == Type::TOP) {
3050     return false;
3051   }
3052   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3053     // tty->print_cr("optimize barrier on null");
3054     return false;
3055   }
3056   if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3057     // tty->print_cr("optimize barrier on constant");
3058     return false;
3059   }
3060 
3061   switch (n->Opcode()) {
3062     case Op_AddP:
3063       return true; // TODO: Can refine?
3064     case Op_LoadP:
3065     case Op_ShenandoahCompareAndExchangeN:
3066     case Op_ShenandoahCompareAndExchangeP:
3067     case Op_CompareAndExchangeN:
3068     case Op_CompareAndExchangeP:
3069     case Op_GetAndSetN:
3070     case Op_GetAndSetP:
3071       return true;
3072     case Op_Phi: {
3073       for (uint i = 1; i < n->req(); i++) {
3074         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3075       }
3076       return false;
3077     }
3078     case Op_CheckCastPP:
3079     case Op_CastPP:
3080       return needs_barrier_impl(phase, n->in(1), visited);
3081     case Op_Proj:
3082       return needs_barrier_impl(phase, n->in(0), visited);
3083     case Op_ShenandoahLoadReferenceBarrier:
3084       // tty->print_cr("optimize barrier on barrier");
3085       return false;
3086     case Op_Parm:
3087       // tty->print_cr("optimize barrier on input arg");
3088       return false;
3089     case Op_DecodeN:
3090     case Op_EncodeP:
3091       return needs_barrier_impl(phase, n->in(1), visited);
3092     case Op_LoadN:
3093       return true;
3094     case Op_CMoveN:
3095     case Op_CMoveP:
3096       return needs_barrier_impl(phase, n->in(2), visited) ||
3097              needs_barrier_impl(phase, n->in(3), visited);
3098     case Op_ShenandoahIUBarrier:
3099       return needs_barrier_impl(phase, n->in(1), visited);
3100     case Op_CreateEx:
3101       return false;
3102     default:
3103       break;
3104   }
3105 #ifdef ASSERT
3106   tty->print("need barrier on?: ");
3107   tty->print_cr("ins:");
3108   n->dump(2);
3109   tty->print_cr("outs:");
3110   n->dump(-2);
3111   ShouldNotReachHere();
3112 #endif
3113   return true;
3114 }
--- EOF ---