1 
   2 /*
   3  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc_implementation/shenandoah/shenandoahForwarding.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
  31 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  32 #include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  33 #include "opto/block.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/runtime.hpp"
  38 #include "opto/subnode.hpp"
  39 
  40 #ifdef _LP64
  41 #define LoadXNode LoadLNode
  42 #else
  43 #define LoadXNode LoadINode
  44 #endif
  45 
  46 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  47   if (C->shenandoah_barriers_count() > 0) {
  48     C->clear_major_progress();
  49     PhaseIdealLoop ideal_loop(igvn, false, true);
  50     if (C->failing()) return false;
  51     PhaseIdealLoop::verify(igvn);
  52     DEBUG_ONLY(verify_raw_mem(C->root());)
  53   }
  54   return true;
  55 }
  56 
  57 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  58   if (!UseShenandoahGC) {
  59     return false;
  60   }
  61   assert(iff->is_If(), "bad input");
  62   if (iff->Opcode() != Op_If) {
  63     return false;
  64   }
  65   Node* bol = iff->in(1);
  66   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  67     return false;
  68   }
  69   Node* cmp = bol->in(1);
  70   if (cmp->Opcode() != Op_CmpI) {
  71     return false;
  72   }
  73   Node* in1 = cmp->in(1);
  74   Node* in2 = cmp->in(2);
  75   if (in2->find_int_con(-1) != 0) {
  76     return false;
  77   }
  78   if (in1->Opcode() != Op_AndI) {
  79     return false;
  80   }
  81   in2 = in1->in(2);
  82   if (in2->find_int_con(-1) != mask) {
  83     return false;
  84   }
  85   in1 = in1->in(1);
  86 
  87   return is_gc_state_load(in1);
  88 }
  89 
  90 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
  91   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
  92 }
  93 
  94 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
  95   if (!UseShenandoahGC) {
  96     return false;
  97   }
  98   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
  99     return false;
 100   }
 101   Node* addp = n->in(MemNode::Address);
 102   if (!addp->is_AddP()) {
 103     return false;
 104   }
 105   Node* base = addp->in(AddPNode::Address);
 106   Node* off = addp->in(AddPNode::Offset);
 107   if (base->Opcode() != Op_ThreadLocal) {
 108     return false;
 109   }
 110   if (off->find_intptr_t_con(-1) != in_bytes(JavaThread::gc_state_offset())) {
 111     return false;
 112   }
 113   return true;
 114 }
 115 
 116 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 117   assert(phase->is_dominator(stop, start), "bad inputs");
 118   ResourceMark rm;
 119   Unique_Node_List wq;
 120   wq.push(start);
 121   for (uint next = 0; next < wq.size(); next++) {
 122     Node *m = wq.at(next);
 123     if (m == stop) {
 124       continue;
 125     }
 126     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 127       return true;
 128     }
 129     if (m->is_Region()) {
 130       for (uint i = 1; i < m->req(); i++) {
 131         wq.push(m->in(i));
 132       }
 133     } else {
 134       wq.push(m->in(0));
 135     }
 136   }
 137   return false;
 138 }
 139 
 140 #ifdef ASSERT
 141 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 142   assert(phis.size() == 0, "");
 143 
 144   while (true) {
 145     if (in->bottom_type() == TypePtr::NULL_PTR) {
 146       if (trace) {tty->print_cr("NULL");}
 147     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 148       if (trace) {tty->print_cr("Non oop");}
 149     } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) {
 150       if (trace) {tty->print_cr("Java mirror");}
 151     } else {
 152       if (in->is_ConstraintCast() || in->Opcode() == Op_CheckCastPP) {
 153         in = in->in(1);
 154         continue;
 155       } else if (in->is_AddP()) {
 156         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 157         in = in->in(AddPNode::Address);
 158         continue;
 159       } else if (in->is_Con()) {
 160         if (trace) {
 161           tty->print("Found constant");
 162           in->dump();
 163         }
 164       } else if (in->Opcode() == Op_Parm) {
 165         if (trace) {
 166           tty->print("Found argument");
 167         }
 168       } else if (in->Opcode() == Op_CreateEx) {
 169         if (trace) {
 170           tty->print("Found create-exception");
 171         }
 172       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 173         if (trace) {
 174           tty->print("Found raw LoadP (OSR argument?)");
 175         }
 176       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 177         if (t == ShenandoahOopStore) {
 178           uint i = 0;
 179           for (; i < phis.size(); i++) {
 180             Node* n = phis.node_at(i);
 181           }
 182           if (i == phis.size()) {
 183             return false;
 184           }
 185         }
 186         barriers_used.push(in);
 187         if (trace) {tty->print("Found barrier"); in->dump();}
 188       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 189         if (trace) {
 190           tty->print("Found alloc");
 191           in->in(0)->dump();
 192         }
 193       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 194         if (trace) {
 195           tty->print("Found Java call");
 196         }
 197       } else if (in->is_Phi()) {
 198         if (!visited.test_set(in->_idx)) {
 199           if (trace) {tty->print("Pushed phi:"); in->dump();}
 200           phis.push(in, 2);
 201           in = in->in(1);
 202           continue;
 203         }
 204         if (trace) {tty->print("Already seen phi:"); in->dump();}
 205       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 206         if (!visited.test_set(in->_idx)) {
 207           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 208           phis.push(in, CMoveNode::IfTrue);
 209           in = in->in(CMoveNode::IfFalse);
 210           continue;
 211         }
 212         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 213       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 214         in = in->in(1);
 215         continue;
 216       } else {
 217         return false;
 218       }
 219     }
 220     bool cont = false;
 221     while (phis.is_nonempty()) {
 222       uint idx = phis.index();
 223       Node* phi = phis.node();
 224       if (idx >= phi->req()) {
 225         if (trace) {tty->print("Popped phi:"); phi->dump();}
 226         phis.pop();
 227         continue;
 228       }
 229       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 230       in = phi->in(idx);
 231       phis.set_index(idx+1);
 232       cont = true;
 233       break;
 234     }
 235     if (!cont) {
 236       break;
 237     }
 238   }
 239   return true;
 240 }
 241 
 242 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 243   if (n1 != NULL) {
 244     n1->dump(+10);
 245   }
 246   if (n2 != NULL) {
 247     n2->dump(+10);
 248   }
 249   fatal(err_msg("%s", msg));
 250 }
 251 
 252 static const char* call_name(CallNode* call) {
 253   if (call->is_CallRuntime()) {
 254     return call->as_CallRuntime()->_name;
 255   }
 256   if (call->is_CallStaticJava()) {
 257     return call->as_CallStaticJava()->_name;
 258   }
 259   return NULL;
 260 }
 261 
 262 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 263   ResourceMark rm;
 264   Unique_Node_List wq;
 265   GrowableArray<Node*> barriers;
 266   Unique_Node_List barriers_used;
 267   Node_Stack phis(0);
 268   VectorSet visited(Thread::current()->resource_area());
 269   const bool trace = true;
 270   const bool verify_no_useless_barrier = false;
 271 
 272   wq.push(root);
 273   for (uint next = 0; next < wq.size(); next++) {
 274     Node *n = wq.at(next);
 275     if (n->is_Load()) {
 276       const bool trace = false;
 277       if (trace) {tty->print("Verifying"); n->dump();}
 278       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 279         if (trace) {tty->print_cr("Load range/klass");}
 280       } else {
 281         const TypePtr* adr_type = n->as_Load()->adr_type();
 282 
 283         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 284           if (trace) {tty->print_cr("Mark load");}
 285         } else if (adr_type->isa_instptr() &&
 286                    adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 287                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
 288           if (trace) {tty->print_cr("Reference.get()");}
 289         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 290           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 291         }
 292       }
 293     } else if (n->is_Store()) {
 294       const bool trace = false;
 295 
 296       if (trace) {tty->print("Verifying"); n->dump();}
 297       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 298         Node* adr = n->in(MemNode::Address);
 299         bool verify = true;
 300 
 301         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 302           adr = adr->in(AddPNode::Address);
 303           if (adr->is_AddP()) {
 304             assert(adr->in(AddPNode::Base)->is_top(), "");
 305             adr = adr->in(AddPNode::Address);
 306             if (adr->Opcode() == Op_LoadP &&
 307                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 308                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 309                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(JavaThread::satb_mark_queue_offset()) + in_bytes(PtrQueue::byte_offset_of_buf())) {
 310               if (trace) {tty->print_cr("SATB prebarrier");}
 311               verify = false;
 312             }
 313           }
 314         }
 315 
 316         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
 317           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 318         }
 319       }
 320       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 321         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 322       }
 323     } else if (n->Opcode() == Op_CmpP) {
 324       const bool trace = false;
 325 
 326       Node* in1 = n->in(1);
 327       Node* in2 = n->in(2);
 328       if (in1->bottom_type()->isa_oopptr()) {
 329         if (trace) {tty->print("Verifying"); n->dump();}
 330 
 331         bool mark_inputs = false;
 332         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 333             (in1->is_Con() || in2->is_Con())) {
 334           if (trace) {tty->print_cr("Comparison against a constant");}
 335           mark_inputs = true;
 336         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 337                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 338           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 339           mark_inputs = true;
 340         } else {
 341           assert(in2->bottom_type()->isa_oopptr(), "");
 342 
 343           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 344               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 345             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 346           }
 347         }
 348         if (verify_no_useless_barrier &&
 349             mark_inputs &&
 350             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 351              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 352           phis.clear();
 353           visited.Reset();
 354         }
 355       }
 356     } else if (n->is_LoadStore()) {
 357       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 358           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
 359         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 360       }
 361 
 362       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 363         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 364       }
 365     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 366       CallNode* call = n->as_Call();
 367 
 368       static struct {
 369         const char* name;
 370         struct {
 371           int pos;
 372           verify_type t;
 373         } args[6];
 374       } calls[] = {
 375         "aescrypt_encryptBlock",
 376         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 377           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 378         "aescrypt_decryptBlock",
 379         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 380           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 381         "multiplyToLen",
 382         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 383           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 384         "squareToLen",
 385         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 386           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 387         "montgomery_multiply",
 388         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 389           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 390         "montgomery_square",
 391         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 392           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 393         "mulAdd",
 394         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 395           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 396         "vectorizedMismatch",
 397         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 398           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 399         "updateBytesCRC32",
 400         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 401           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 402         "updateBytesAdler32",
 403         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 404           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 405         "updateBytesCRC32C",
 406         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 407           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 408         "counterMode_AESCrypt",
 409         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 410           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 411         "cipherBlockChaining_encryptAESCrypt",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 413           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "cipherBlockChaining_decryptAESCrypt",
 415         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 416           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "shenandoah_clone_barrier",
 418         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "ghash_processBlocks",
 421         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "sha1_implCompress",
 424         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 425           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "sha256_implCompress",
 427         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "sha512_implCompress",
 430         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "sha1_implCompressMB",
 433         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "sha256_implCompressMB",
 436         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "sha512_implCompressMB",
 439         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "encodeBlock",
 442         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444       };
 445 
 446       if (call->is_CallRuntime() && call->as_CallRuntime()->is_call_to_arraycopystub()) {
 447         Node* dest = NULL;
 448         const TypeTuple* args = n->as_Call()->_tf->domain();
 449         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 450           if (args->field_at(i)->isa_ptr()) {
 451             j++;
 452             if (j == 2) {
 453               dest = n->in(i);
 454               break;
 455             }
 456           }
 457         }
 458         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 459             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 460           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 461         }
 462       } else if (strlen(call_name(call)) > 5 &&
 463                  !strcmp(call_name(call) + strlen(call_name(call)) - 5, "_fill")) {
 464         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 465           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 466         }
 467       } else if (!strcmp(call_name(call), "g1_wb_pre")) {
 468         // skip
 469       } else {
 470         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 471         int i = 0;
 472         for (; i < calls_len; i++) {
 473           if (!strcmp(calls[i].name, call_name(call))) {
 474             break;
 475           }
 476         }
 477         if (i != calls_len) {
 478           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 479           for (uint j = 0; j < args_len; j++) {
 480             int pos = calls[i].args[j].pos;
 481             if (pos == -1) {
 482               break;
 483             }
 484             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 485               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 486             }
 487           }
 488           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 489             if (call->in(j)->bottom_type()->make_ptr() &&
 490                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 491               uint k = 0;
 492               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 493               if (k == args_len) {
 494                 fatal(err_msg("arg %d for call %s not covered", j, call_name(call)));
 495               }
 496             }
 497           }
 498         } else {
 499           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 500             if (call->in(j)->bottom_type()->make_ptr() &&
 501                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 502               fatal(err_msg("%s not covered", call_name(call)));
 503             }
 504           }
 505         }
 506       }
 507     } else if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 508       // skip
 509     } else if (n->is_AddP()
 510                || n->is_Phi()
 511                || n->is_ConstraintCast()
 512                || n->Opcode() == Op_CheckCastPP
 513                || n->Opcode() == Op_Return
 514                || n->Opcode() == Op_CMoveP
 515                || n->Opcode() == Op_CMoveN
 516                || n->Opcode() == Op_Rethrow
 517                || n->is_MemBar()
 518                || n->Opcode() == Op_Conv2B
 519                || n->Opcode() == Op_SafePoint
 520                || n->is_CallJava()
 521                || n->Opcode() == Op_Unlock
 522                || n->Opcode() == Op_EncodeP
 523                || n->Opcode() == Op_DecodeN) {
 524       // nothing to do
 525     } else {
 526       static struct {
 527         int opcode;
 528         struct {
 529           int pos;
 530           verify_type t;
 531         } inputs[2];
 532       } others[] = {
 533         Op_FastLock,
 534         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 535         Op_Lock,
 536         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 537         Op_AryEq,
 538         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 539         Op_StrIndexOf,
 540         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 541         Op_StrComp,
 542         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 543         Op_StrEquals,
 544         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 545         Op_EncodeISOArray,
 546         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 547         Op_CastP2X,
 548         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 549         Op_ClearArray,
 550         { { 3, ShenandoahStore },                 { -1, ShenandoahNone} },
 551       };
 552 
 553       const int others_len = sizeof(others) / sizeof(others[0]);
 554       int i = 0;
 555       for (; i < others_len; i++) {
 556         if (others[i].opcode == n->Opcode()) {
 557           break;
 558         }
 559       }
 560       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 561       if (i != others_len) {
 562         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 563         for (uint j = 0; j < inputs_len; j++) {
 564           int pos = others[i].inputs[j].pos;
 565           if (pos == -1) {
 566             break;
 567           }
 568           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 569             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 570           }
 571         }
 572         for (uint j = 1; j < stop; j++) {
 573           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 574               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 575             uint k = 0;
 576             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 577             if (k == inputs_len) {
 578               fatal(err_msg("arg %d for node %s not covered", j, n->Name()));
 579             }
 580           }
 581         }
 582       } else {
 583         for (uint j = 1; j < stop; j++) {
 584           if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
 585               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 586             fatal(err_msg("%s not covered", n->Name()));
 587           }
 588         }
 589       }
 590     }
 591 
 592     if (n->is_SafePoint()) {
 593       SafePointNode* sfpt = n->as_SafePoint();
 594       if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
 595         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 596           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 597             phis.clear();
 598             visited.Reset();
 599           }
 600         }
 601       }
 602     }
 603   }
 604 
 605   if (verify_no_useless_barrier) {
 606     for (int i = 0; i < barriers.length(); i++) {
 607       Node* n = barriers.at(i);
 608       if (!barriers_used.member(n)) {
 609         tty->print("XXX useless barrier"); n->dump(-2);
 610         ShouldNotReachHere();
 611       }
 612     }
 613   }
 614 }
 615 #endif
 616 
 617 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 618   // That both nodes have the same control is not sufficient to prove
 619   // domination, verify that there's no path from d to n
 620   ResourceMark rm;
 621   Unique_Node_List wq;
 622   wq.push(d);
 623   for (uint next = 0; next < wq.size(); next++) {
 624     Node *m = wq.at(next);
 625     if (m == n) {
 626       return false;
 627     }
 628     if (m->is_Phi() && m->in(0)->is_Loop()) {
 629       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 630     } else {
 631       if (m->is_Store() || m->is_LoadStore()) {
 632         // Take anti-dependencies into account
 633         Node* mem = m->in(MemNode::Memory);
 634         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 635           Node* u = mem->fast_out(i);
 636           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 637               phase->ctrl_or_self(u) == c) {
 638             wq.push(u);
 639           }
 640         }
 641       }
 642       for (uint i = 0; i < m->req(); i++) {
 643         if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
 644           wq.push(m->in(i));
 645         }
 646       }
 647     }
 648   }
 649   return true;
 650 }
 651 
 652 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 653   if (d_c != n_c) {
 654     return phase->is_dominator(d_c, n_c);
 655   }
 656   return is_dominator_same_ctrl(d_c, d, n, phase);
 657 }
 658 
 659 Node* next_mem(Node* mem, int alias) {
 660   Node* res = NULL;
 661   if (mem->is_Proj()) {
 662     res = mem->in(0);
 663   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 664     res = mem->in(TypeFunc::Memory);
 665   } else if (mem->is_Phi()) {
 666     res = mem->in(1);
 667   } else if (mem->is_MergeMem()) {
 668     res = mem->as_MergeMem()->memory_at(alias);
 669   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 670     assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 671     res = mem->in(MemNode::Memory);
 672   } else {
 673 #ifdef ASSERT
 674     mem->dump();
 675 #endif
 676     ShouldNotReachHere();
 677   }
 678   return res;
 679 }
 680 
 681 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 682   Node* iffproj = NULL;
 683   while (c != dom) {
 684     Node* next = phase->idom(c);
 685     assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 686     if (c->is_Region()) {
 687       ResourceMark rm;
 688       Unique_Node_List wq;
 689       wq.push(c);
 690       for (uint i = 0; i < wq.size(); i++) {
 691         Node *n = wq.at(i);
 692         if (n == next) {
 693           continue;
 694         }
 695         if (n->is_Region()) {
 696           for (uint j = 1; j < n->req(); j++) {
 697             wq.push(n->in(j));
 698           }
 699         } else {
 700           wq.push(n->in(0));
 701         }
 702       }
 703       for (uint i = 0; i < wq.size(); i++) {
 704         Node *n = wq.at(i);
 705         assert(n->is_CFG(), "");
 706         if (n->is_Multi()) {
 707           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 708             Node* u = n->fast_out(j);
 709             if (u->is_CFG()) {
 710               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 711                 return NodeSentinel;
 712               }
 713             }
 714           }
 715         }
 716       }
 717     } else  if (c->is_Proj()) {
 718       if (c->is_IfProj()) {
 719         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
 720           // continue;
 721         } else {
 722           if (!allow_one_proj) {
 723             return NodeSentinel;
 724           }
 725           if (iffproj == NULL) {
 726             iffproj = c;
 727           } else {
 728             return NodeSentinel;
 729           }
 730         }
 731       } else if (c->Opcode() == Op_JumpProj) {
 732         return NodeSentinel; // unsupported
 733       } else if (c->Opcode() == Op_CatchProj) {
 734         return NodeSentinel; // unsupported
 735       } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
 736         return NodeSentinel; // unsupported
 737       } else {
 738         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 739       }
 740     }
 741     c = next;
 742   }
 743   return iffproj;
 744 }
 745 
 746 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 747   ResourceMark rm;
 748   VectorSet wq(Thread::current()->resource_area());
 749   wq.set(mem->_idx);
 750   mem_ctrl = phase->ctrl_or_self(mem);
 751   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 752     mem = next_mem(mem, alias);
 753     if (wq.test_set(mem->_idx)) {
 754       return NULL;
 755     }
 756     mem_ctrl = phase->ctrl_or_self(mem);
 757   }
 758   if (mem->is_MergeMem()) {
 759     mem = mem->as_MergeMem()->memory_at(alias);
 760     mem_ctrl = phase->ctrl_or_self(mem);
 761   }
 762   return mem;
 763 }
 764 
 765 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 766   Node* mem = NULL;
 767   Node* c = ctrl;
 768   do {
 769     if (c->is_Region()) {
 770       Node* phi_bottom = NULL;
 771       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
 772         Node* u = c->fast_out(i);
 773         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 774           if (u->adr_type() == TypePtr::BOTTOM) {
 775             mem = u;
 776           }
 777         }
 778       }
 779     } else {
 780       if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
 781         CallProjections projs;
 782         c->as_Call()->extract_projections(&projs, true, false);
 783         if (projs.fallthrough_memproj != NULL) {
 784           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 785             if (projs.catchall_memproj == NULL) {
 786               mem = projs.fallthrough_memproj;
 787             } else {
 788               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 789                 mem = projs.fallthrough_memproj;
 790               } else {
 791                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 792                 mem = projs.catchall_memproj;
 793               }
 794             }
 795           }
 796         } else {
 797           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 798           if (proj != NULL &&
 799               proj->adr_type() == TypePtr::BOTTOM) {
 800             mem = proj;
 801           }
 802         }
 803       } else {
 804         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 805           Node* u = c->fast_out(i);
 806           if (u->is_Proj() &&
 807               u->bottom_type() == Type::MEMORY &&
 808               u->adr_type() == TypePtr::BOTTOM) {
 809               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 810               assert(mem == NULL, "only one proj");
 811               mem = u;
 812           }
 813         }
 814         assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
 815       }
 816     }
 817     c = phase->idom(c);
 818   } while (mem == NULL);
 819   return mem;
 820 }
 821 
 822 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 823   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 824     Node* u = n->fast_out(i);
 825     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 826       uses.push(u);
 827     }
 828   }
 829 }
 830 
 831 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 832                                                PhaseIdealLoop* phase, int flags) {
 833   PhaseIterGVN& igvn = phase->igvn();
 834   Node* old_ctrl = ctrl;
 835 
 836   Node* thread          = new (phase->C) ThreadLocalNode();
 837   Node* gc_state_offset = igvn.MakeConX(in_bytes(JavaThread::gc_state_offset()));
 838   Node* gc_state_addr   = new (phase->C) AddPNode(phase->C->top(), thread, gc_state_offset);
 839   Node* gc_state        = new (phase->C) LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 840                                                    DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 841                                                    TypeInt::BYTE, MemNode::unordered);
 842 
 843   Node* gc_state_and    = new (phase->C) AndINode(gc_state, igvn.intcon(flags));
 844   Node* gc_state_cmp    = new (phase->C) CmpINode(gc_state_and, igvn.zerocon(T_INT));
 845   Node* gc_state_bool   = new (phase->C) BoolNode(gc_state_cmp, BoolTest::ne);
 846 
 847   IfNode* gc_state_iff  = new (phase->C) IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 848   ctrl                  = new (phase->C) IfTrueNode(gc_state_iff);
 849   test_fail_ctrl        = new (phase->C) IfFalseNode(gc_state_iff);
 850 
 851   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 852   phase->register_control(gc_state_iff,   loop, old_ctrl);
 853   phase->register_control(ctrl,           loop, gc_state_iff);
 854   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 855 
 856   phase->register_new_node(thread,        old_ctrl);
 857   phase->register_new_node(gc_state_addr, old_ctrl);
 858   phase->register_new_node(gc_state,      old_ctrl);
 859   phase->register_new_node(gc_state_and,  old_ctrl);
 860   phase->register_new_node(gc_state_cmp,  old_ctrl);
 861   phase->register_new_node(gc_state_bool, old_ctrl);
 862 
 863   phase->set_ctrl(gc_state_offset, phase->C->root());
 864   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 865 }
 866 
 867 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 868   Node* old_ctrl = ctrl;
 869   PhaseIterGVN& igvn = phase->igvn();
 870 
 871   const Type* val_t = igvn.type(val);
 872   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 873     Node* null_cmp   = new (phase->C) CmpPNode(val, igvn.zerocon(T_OBJECT));
 874     Node* null_test  = new (phase->C) BoolNode(null_cmp, BoolTest::ne);
 875 
 876     IfNode* null_iff = new (phase->C) IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 877     ctrl             = new (phase->C) IfTrueNode(null_iff);
 878     null_ctrl        = new (phase->C) IfFalseNode(null_iff);
 879 
 880     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 881     phase->register_control(null_iff,  loop, old_ctrl);
 882     phase->register_control(ctrl,      loop, null_iff);
 883     phase->register_control(null_ctrl, loop, null_iff);
 884 
 885     phase->register_new_node(null_cmp,  old_ctrl);
 886     phase->register_new_node(null_test, old_ctrl);
 887   }
 888 }
 889 
 890 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
 891   IdealLoopTree *loop = phase->get_loop(c);
 892   Node* iff = unc_ctrl->in(0);
 893   assert(iff->is_If(), "broken");
 894   Node* new_iff = iff->clone();
 895   new_iff->set_req(0, c);
 896   phase->register_control(new_iff, loop, c);
 897   Node* iffalse = new (phase->C) IfFalseNode(new_iff->as_If());
 898   phase->register_control(iffalse, loop, new_iff);
 899   Node* iftrue = new (phase->C) IfTrueNode(new_iff->as_If());
 900   phase->register_control(iftrue, loop, new_iff);
 901   c = iftrue;
 902   const Type *t = phase->igvn().type(val);
 903   assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
 904   Node* uncasted_val = val->in(1);
 905   val = new (phase->C) CastPPNode(uncasted_val, t);
 906   val->init_req(0, c);
 907   phase->register_new_node(val, c);
 908   return val;
 909 }
 910 
 911 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
 912                                                 Unique_Node_List& uses, PhaseIdealLoop* phase) {
 913   IfNode* iff = unc_ctrl->in(0)->as_If();
 914   Node* proj = iff->proj_out(0);
 915   assert(proj != unc_ctrl, "bad projection");
 916   Node* use = proj->unique_ctrl_out();
 917 
 918   assert(use == unc || use->is_Region(), "what else?");
 919 
 920   uses.clear();
 921   if (use == unc) {
 922     phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
 923     for (uint i = 1; i < unc->req(); i++) {
 924       Node* n = unc->in(i);
 925       if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
 926         uses.push(n);
 927       }
 928     }
 929   } else {
 930     assert(use->is_Region(), "what else?");
 931     uint idx = 1;
 932     for (; use->in(idx) != proj; idx++);
 933     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
 934       Node* u = use->fast_out(i);
 935       if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
 936         uses.push(u->in(idx));
 937       }
 938     }
 939   }
 940   for(uint next = 0; next < uses.size(); next++ ) {
 941     Node *n = uses.at(next);
 942     assert(phase->get_ctrl(n) == proj, "bad control");
 943     phase->set_ctrl_and_loop(n, new_unc_ctrl);
 944     if (n->in(0) == proj) {
 945       phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
 946     }
 947     for (uint i = 0; i < n->req(); i++) {
 948       Node* m = n->in(i);
 949       if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
 950         uses.push(m);
 951       }
 952     }
 953   }
 954 
 955   phase->igvn().rehash_node_delayed(use);
 956   int nb = use->replace_edge(proj, new_unc_ctrl);
 957   assert(nb == 1, "only use expected");
 958 }
 959 
 960 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 961   Node* old_ctrl = ctrl;
 962   PhaseIterGVN& igvn = phase->igvn();
 963 
 964   Node* raw_val        = new (phase->C) CastP2XNode(old_ctrl, val);
 965   Node* cset_idx       = new (phase->C) URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 966 
 967   // Figure out the target cset address with raw pointer math.
 968   // This avoids matching AddP+LoadB that would emit inefficient code.
 969   // See JDK-8245465.
 970   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 971   Node* cset_addr      = new (phase->C) CastP2XNode(old_ctrl, cset_addr_ptr);
 972   Node* cset_load_addr = new (phase->C) AddXNode(cset_addr, cset_idx);
 973   Node* cset_load_ptr  = new (phase->C) CastX2PNode(cset_load_addr);
 974 
 975   Node* cset_load      = new (phase->C) LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 976                                                   DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
 977                                                   TypeInt::BYTE, MemNode::unordered);
 978   Node* cset_cmp       = new (phase->C) CmpINode(cset_load, igvn.zerocon(T_INT));
 979   Node* cset_bool      = new (phase->C) BoolNode(cset_cmp, BoolTest::ne);
 980 
 981   IfNode* cset_iff     = new (phase->C) IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 982   ctrl                 = new (phase->C) IfTrueNode(cset_iff);
 983   not_cset_ctrl        = new (phase->C) IfFalseNode(cset_iff);
 984 
 985   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 986   phase->register_control(cset_iff,      loop, old_ctrl);
 987   phase->register_control(ctrl,          loop, cset_iff);
 988   phase->register_control(not_cset_ctrl, loop, cset_iff);
 989 
 990   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 991 
 992   phase->register_new_node(raw_val,        old_ctrl);
 993   phase->register_new_node(cset_idx,       old_ctrl);
 994   phase->register_new_node(cset_addr,      old_ctrl);
 995   phase->register_new_node(cset_load_addr, old_ctrl);
 996   phase->register_new_node(cset_load_ptr,  old_ctrl);
 997   phase->register_new_node(cset_load,      old_ctrl);
 998   phase->register_new_node(cset_cmp,       old_ctrl);
 999   phase->register_new_node(cset_bool,      old_ctrl);
1000 }
1001 
1002 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
1003   IdealLoopTree*loop = phase->get_loop(ctrl);
1004   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
1005 
1006   // The slow path stub consumes and produces raw memory in addition
1007   // to the existing memory edges
1008   Node* base = find_bottom_mem(ctrl, phase);
1009   MergeMemNode* mm = MergeMemNode::make(phase->C, base);
1010   mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1011   phase->register_new_node(mm, ctrl);
1012 
1013   address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
1014           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
1015           CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
1016 
1017   Node* call = new (phase->C) CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(),
1018                                            target,
1019                                            "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1020   call->init_req(TypeFunc::Control, ctrl);
1021   call->init_req(TypeFunc::I_O, phase->C->top());
1022   call->init_req(TypeFunc::Memory, mm);
1023   call->init_req(TypeFunc::FramePtr, phase->C->top());
1024   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1025   call->init_req(TypeFunc::Parms, val);
1026   call->init_req(TypeFunc::Parms+1, load_addr);
1027   phase->register_control(call, loop, ctrl);
1028   ctrl = new (phase->C) ProjNode(call, TypeFunc::Control);
1029   phase->register_control(ctrl, loop, call);
1030   result_mem = new (phase->C) ProjNode(call, TypeFunc::Memory);
1031   phase->register_new_node(result_mem, call);
1032   val = new (phase->C) ProjNode(call, TypeFunc::Parms);
1033   phase->register_new_node(val, call);
1034   val = new (phase->C) CheckCastPPNode(ctrl, val, obj_type);
1035   phase->register_new_node(val, ctrl);
1036 }
1037 
1038 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1039   Node* ctrl = phase->get_ctrl(barrier);
1040   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1041 
1042   // Update the control of all nodes that should be after the
1043   // barrier control flow
1044   uses.clear();
1045   // Every node that is control dependent on the barrier's input
1046   // control will be after the expanded barrier. The raw memory (if
1047   // its memory is control dependent on the barrier's input control)
1048   // must stay above the barrier.
1049   uses_to_ignore.clear();
1050   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1051     uses_to_ignore.push(init_raw_mem);
1052   }
1053   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1054     Node *n = uses_to_ignore.at(next);
1055     for (uint i = 0; i < n->req(); i++) {
1056       Node* in = n->in(i);
1057       if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1058         uses_to_ignore.push(in);
1059       }
1060     }
1061   }
1062   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1063     Node* u = ctrl->fast_out(i);
1064     if (u->_idx < last &&
1065         u != barrier &&
1066         !uses_to_ignore.member(u) &&
1067         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1068         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1069       Node* old_c = phase->ctrl_or_self(u);
1070       Node* c = old_c;
1071       if (c != ctrl ||
1072           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1073           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1074         phase->igvn().rehash_node_delayed(u);
1075         int nb = u->replace_edge(ctrl, region);
1076         if (u->is_CFG()) {
1077           if (phase->idom(u) == ctrl) {
1078             phase->set_idom(u, region, phase->dom_depth(region));
1079           }
1080         } else if (phase->get_ctrl(u) == ctrl) {
1081           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1082           uses.push(u);
1083         }
1084         assert(nb == 1, "more than 1 ctrl input?");
1085         --i, imax -= nb;
1086       }
1087     }
1088   }
1089 }
1090 
1091 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1092   Node* region = NULL;
1093   while (c != ctrl) {
1094     if (c->is_Region()) {
1095       region = c;
1096     }
1097     c = phase->idom(c);
1098   }
1099   assert(region != NULL, "");
1100   if (n->is_Bool()) {
1101     Node* bol_clone = n->clone();
1102     n = n->in(1);
1103     n_clone = n_clone->in(1);
1104     assert(n->is_Cmp() && n_clone->is_Cmp(), "should be cmp");
1105     Node* cmp_clone = n->clone();
1106     bol_clone->set_req(1, cmp_clone);
1107     if (n->in(1) != n_clone->in(1)) {
1108       cmp_clone->set_req(1, create_phis_on_call_return(ctrl, region, n->in(1), n_clone->in(1), projs, phase));
1109     }
1110     if (n->in(2) != n_clone->in(2)) {
1111       cmp_clone->set_req(2, create_phis_on_call_return(ctrl, region, n->in(2), n_clone->in(2), projs, phase));
1112     }
1113     phase->register_new_node(cmp_clone, region);
1114     phase->register_new_node(bol_clone, region);
1115     return bol_clone;
1116   }
1117   Node* phi = new (phase->C) PhiNode(region, n->bottom_type());
1118   for (uint j = 1; j < region->req(); j++) {
1119     Node* in = region->in(j);
1120     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1121       phi->init_req(j, n);
1122     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1123       phi->init_req(j, n_clone);
1124     } else {
1125       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1126     }
1127   }
1128   phase->register_new_node(phi, region);
1129   return phi;
1130 }
1131 
1132 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1133 
1134   Unique_Node_List uses;
1135   Node_Stack stack(0);
1136   Node_List clones;
1137   for (int i = phase->C->shenandoah_barriers_count() - 1; i >= 0; i--) {
1138     ShenandoahLoadReferenceBarrierNode* lrb = phase->C->shenandoah_barrier(i);
1139     if (lrb->is_redundant()) {
1140       continue;
1141     }
1142 
1143     Node* ctrl = phase->get_ctrl(lrb);
1144     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1145 
1146     CallStaticJavaNode* unc = NULL;
1147     Node* unc_ctrl = NULL;
1148     Node* uncasted_val = val;
1149 
1150     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1151       Node* u = lrb->fast_out(i);
1152       if (u->Opcode() == Op_CastPP &&
1153           u->in(0) != NULL &&
1154           phase->is_dominator(u->in(0), ctrl)) {
1155         const Type* u_t = phase->igvn().type(u);
1156 
1157         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1158             u->in(0)->Opcode() == Op_IfTrue &&
1159             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1160             u->in(0)->in(0)->is_If() &&
1161             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1162             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1163             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1164             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1165             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1166           IdealLoopTree* loop = phase->get_loop(ctrl);
1167           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1168 
1169           if (!unc_loop->is_member(loop)) {
1170             continue;
1171           }
1172 
1173           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1174           assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1175           if (branch == NodeSentinel) {
1176             continue;
1177           }
1178 
1179           phase->igvn().replace_input_of(u, 1, val);
1180           phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1181           phase->set_ctrl(u, u->in(0));
1182           phase->set_ctrl(lrb, u->in(0));
1183           unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1184           unc_ctrl = u->in(0);
1185           val = u;
1186 
1187           for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1188             Node* u = val->fast_out(j);
1189             if (u == lrb) continue;
1190             phase->igvn().rehash_node_delayed(u);
1191             int nb = u->replace_edge(val, lrb);
1192             --j; jmax -= nb;
1193           }
1194 
1195           RegionNode* r = new (phase->C) RegionNode(3);
1196           IfNode* iff = unc_ctrl->in(0)->as_If();
1197 
1198           Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1199           Node* unc_ctrl_clone = unc_ctrl->clone();
1200           phase->register_control(unc_ctrl_clone, loop, iff);
1201           Node* c = unc_ctrl_clone;
1202           Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1203           r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1204 
1205           phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1206           phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1207           phase->lazy_replace(c, unc_ctrl);
1208           c = NULL;;
1209           phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1210           phase->set_ctrl(val, unc_ctrl_clone);
1211 
1212           IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1213           fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1214           Node* iff_proj = iff->proj_out(0);
1215           r->init_req(2, iff_proj);
1216           phase->register_control(r, phase->ltree_root(), iff);
1217 
1218           Node* new_bol = new_iff->in(1)->clone();
1219           Node* new_cmp = new_bol->in(1)->clone();
1220           assert(new_cmp->Opcode() == Op_CmpP, "broken");
1221           assert(new_cmp->in(1) == val->in(1), "broken");
1222           new_bol->set_req(1, new_cmp);
1223           new_cmp->set_req(1, lrb);
1224           phase->register_new_node(new_bol, new_iff->in(0));
1225           phase->register_new_node(new_cmp, new_iff->in(0));
1226           phase->igvn().replace_input_of(new_iff, 1, new_bol);
1227           phase->igvn().replace_input_of(new_cast, 1, lrb);
1228 
1229           for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1230             Node* u = lrb->fast_out(i);
1231             if (u == new_cast || u == new_cmp) {
1232               continue;
1233             }
1234             phase->igvn().rehash_node_delayed(u);
1235             int nb = u->replace_edge(lrb, new_cast);
1236             assert(nb > 0, "no update?");
1237             --i; imax -= nb;
1238           }
1239 
1240           for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1241             Node* u = val->fast_out(i);
1242             if (u == lrb) {
1243               continue;
1244             }
1245             phase->igvn().rehash_node_delayed(u);
1246             int nb = u->replace_edge(val, new_cast);
1247             assert(nb > 0, "no update?");
1248             --i; imax -= nb;
1249           }
1250 
1251           ctrl = unc_ctrl_clone;
1252           phase->set_ctrl_and_loop(lrb, ctrl);
1253           break;
1254         }
1255       }
1256     }
1257     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1258       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1259       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1260         // The rethrow call may have too many projections to be
1261         // properly handled here. Given there's no reason for a
1262         // barrier to depend on the call, move it above the call
1263         if (phase->get_ctrl(val) == ctrl) {
1264           assert(val->Opcode() == Op_DecodeN, "unexpected node");
1265           assert(phase->is_dominator(phase->get_ctrl(val->in(1)), call->in(0)), "Load is too low");
1266           phase->set_ctrl(val, call->in(0));
1267         }
1268         phase->set_ctrl(lrb, call->in(0));
1269         continue;
1270       }
1271       CallProjections projs;
1272       call->extract_projections(&projs, false, false);
1273 
1274 #ifdef ASSERT
1275       VectorSet cloned(Thread::current()->resource_area());
1276 #endif
1277       Node* lrb_clone = lrb->clone();
1278       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1279       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1280 
1281       stack.push(lrb, 0);
1282       clones.push(lrb_clone);
1283 
1284       do {
1285         assert(stack.size() == clones.size(), "");
1286         Node* n = stack.node();
1287 #ifdef ASSERT
1288         if (n->is_Load()) {
1289           Node* mem = n->in(MemNode::Memory);
1290           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1291             Node* u = mem->fast_out(j);
1292             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1293           }
1294         }
1295 #endif
1296         uint idx = stack.index();
1297         Node* n_clone = clones.at(clones.size()-1);
1298         if (idx < n->outcnt()) {
1299           Node* u = n->raw_out(idx);
1300           Node* c = phase->ctrl_or_self(u);
1301           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1302             stack.set_index(idx+1);
1303             assert(!u->is_CFG(), "");
1304             stack.push(u, 0);
1305             assert(!cloned.test_set(u->_idx), "only one clone");
1306             Node* u_clone = u->clone();
1307             int nb = u_clone->replace_edge(n, n_clone);
1308             assert(nb > 0, "should have replaced some uses");
1309             phase->register_new_node(u_clone, projs.catchall_catchproj);
1310             clones.push(u_clone);
1311             phase->set_ctrl(u, projs.fallthrough_catchproj);
1312           } else {
1313             bool replaced = false;
1314             if (u->is_Phi()) {
1315               for (uint k = 1; k < u->req(); k++) {
1316                 if (u->in(k) == n) {
1317                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1318                     phase->igvn().replace_input_of(u, k, n_clone);
1319                     replaced = true;
1320                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1321                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1322                     replaced = true;
1323                   }
1324                 }
1325               }
1326             } else {
1327               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1328                 phase->igvn().rehash_node_delayed(u);
1329                 int nb = u->replace_edge(n, n_clone);
1330                 assert(nb > 0, "should have replaced some uses");
1331                 replaced = true;
1332               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1333                 if (u->is_If()) {
1334                   // Can't break If/Bool/Cmp chain
1335                   assert(n->is_Bool(), "unexpected If shape");
1336                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1337                   assert(n_clone->is_Bool(), "unexpected clone");
1338                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1339                   Node* bol_clone = n->clone();
1340                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1341                   bol_clone->set_req(1, cmp_clone);
1342 
1343                   Node* nn = stack.node_at(stack.size()-3);
1344                   Node* nn_clone = clones.at(clones.size()-3);
1345                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1346 
1347                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase));
1348                   assert(nb > 0, "should have replaced some uses");
1349 
1350                   phase->register_new_node(bol_clone, u->in(0));
1351                   phase->register_new_node(cmp_clone, u->in(0));
1352 
1353                   phase->igvn().replace_input_of(u, 1, bol_clone);
1354 
1355                 } else {
1356                   phase->igvn().rehash_node_delayed(u);
1357                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1358                   assert(nb > 0, "should have replaced some uses");
1359                 }
1360                 replaced = true;
1361               }
1362             }
1363             if (!replaced) {
1364               stack.set_index(idx+1);
1365             }
1366           }
1367         } else {
1368           stack.pop();
1369           clones.pop();
1370         }
1371       } while (stack.size() > 0);
1372       assert(stack.size() == 0 && clones.size() == 0, "");
1373     }
1374   }
1375 
1376   // Expand load-reference-barriers
1377   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1378   Unique_Node_List uses_to_ignore;
1379   for (int i = phase->C->shenandoah_barriers_count() - 1; i >= 0; i--) {
1380     ShenandoahLoadReferenceBarrierNode* lrb = phase->C->shenandoah_barrier(i);
1381     if (lrb->is_redundant()) {
1382       phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1383       continue;
1384     }
1385     uint last = phase->C->unique();
1386     Node* ctrl = phase->get_ctrl(lrb);
1387     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1388 
1389 
1390     Node* orig_ctrl = ctrl;
1391 
1392     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1393     Node* init_raw_mem = raw_mem;
1394     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1395 
1396     IdealLoopTree *loop = phase->get_loop(ctrl);
1397     CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1398     Node* unc_ctrl = NULL;
1399     if (unc != NULL) {
1400       if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1401         unc = NULL;
1402       } else {
1403         unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1404       }
1405     }
1406 
1407     Node* uncasted_val = val;
1408     if (unc != NULL) {
1409       uncasted_val = val->in(1);
1410     }
1411 
1412     Node* heap_stable_ctrl = NULL;
1413     Node* null_ctrl = NULL;
1414 
1415     assert(val->bottom_type()->make_oopptr(), "need oop");
1416     assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1417 
1418     enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
1419     Node* region = new (phase->C) RegionNode(PATH_LIMIT);
1420     Node* val_phi = new (phase->C) PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1421     Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1422 
1423     // Stable path.
1424     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
1425     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1426 
1427     // Heap stable case
1428     region->init_req(_heap_stable, heap_stable_ctrl);
1429     val_phi->init_req(_heap_stable, uncasted_val);
1430     raw_mem_phi->init_req(_heap_stable, raw_mem);
1431 
1432     Node* reg2_ctrl = NULL;
1433     // Null case
1434     test_null(ctrl, val, null_ctrl, phase);
1435     if (null_ctrl != NULL) {
1436       reg2_ctrl = null_ctrl->in(0);
1437       region->init_req(_null_path, null_ctrl);
1438       val_phi->init_req(_null_path, uncasted_val);
1439       raw_mem_phi->init_req(_null_path, raw_mem);
1440     } else {
1441       region->del_req(_null_path);
1442       val_phi->del_req(_null_path);
1443       raw_mem_phi->del_req(_null_path);
1444     }
1445 
1446     // Test for in-cset.
1447     // Wires !in_cset(obj) to slot 2 of region and phis
1448     Node* not_cset_ctrl = NULL;
1449     test_in_cset(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1450     if (not_cset_ctrl != NULL) {
1451       if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1452       region->init_req(_not_cset, not_cset_ctrl);
1453       val_phi->init_req(_not_cset, uncasted_val);
1454       raw_mem_phi->init_req(_not_cset, raw_mem);
1455     }
1456 
1457     // Resolve object when orig-value is in cset.
1458     // Make the unconditional resolve for fwdptr.
1459     Node* new_val = uncasted_val;
1460     if (unc_ctrl != NULL) {
1461       // Clone the null check in this branch to allow implicit null check
1462       new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1463       fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1464 
1465       IfNode* iff = unc_ctrl->in(0)->as_If();
1466       phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1467     }
1468 
1469     // Call lrb-stub and wire up that path in slots 4
1470     Node* result_mem = NULL;
1471 
1472     Node* fwd = new_val;
1473     Node* addr;
1474     if (ShenandoahSelfFixing) {
1475       VectorSet visited(Thread::current()->resource_area());
1476       addr = get_load_addr(phase, visited, lrb);
1477     } else {
1478       addr = phase->igvn().zerocon(T_OBJECT);
1479     }
1480     if (addr->Opcode() == Op_AddP) {
1481       Node* orig_base = addr->in(AddPNode::Base);
1482       Node* base = new (phase->C) CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type());
1483       phase->register_new_node(base, ctrl);
1484       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1485         // Field access
1486         addr = addr->clone();
1487         addr->set_req(AddPNode::Base, base);
1488         addr->set_req(AddPNode::Address, base);
1489         phase->register_new_node(addr, ctrl);
1490       } else {
1491         Node* addr2 = addr->in(AddPNode::Address);
1492         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1493               addr2->in(AddPNode::Base) == orig_base) {
1494           addr2 = addr2->clone();
1495           addr2->set_req(AddPNode::Base, base);
1496           addr2->set_req(AddPNode::Address, base);
1497           phase->register_new_node(addr2, ctrl);
1498           addr = addr->clone();
1499           addr->set_req(AddPNode::Base, base);
1500           addr->set_req(AddPNode::Address, addr2);
1501           phase->register_new_node(addr, ctrl);
1502         }
1503       }
1504     }
1505     call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, false, phase);
1506     region->init_req(_evac_path, ctrl);
1507     val_phi->init_req(_evac_path, fwd);
1508     raw_mem_phi->init_req(_evac_path, result_mem);
1509 
1510     phase->register_control(region, loop, heap_stable_iff);
1511     Node* out_val = val_phi;
1512     phase->register_new_node(val_phi, region);
1513     phase->register_new_node(raw_mem_phi, region);
1514 
1515     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1516 
1517     ctrl = orig_ctrl;
1518 
1519     if (unc != NULL) {
1520       for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1521         Node* u = val->fast_out(i);
1522         Node* c = phase->ctrl_or_self(u);
1523         if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1524           phase->igvn().rehash_node_delayed(u);
1525           int nb = u->replace_edge(val, out_val);
1526           --i, imax -= nb;
1527         }
1528       }
1529       if (val->outcnt() == 0) {
1530         phase->igvn()._worklist.push(val);
1531       }
1532     }
1533     phase->igvn().replace_node(lrb, out_val);
1534 
1535     follow_barrier_uses(out_val, ctrl, uses, phase);
1536 
1537     for(uint next = 0; next < uses.size(); next++ ) {
1538       Node *n = uses.at(next);
1539       assert(phase->get_ctrl(n) == ctrl, "bad control");
1540       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1541       phase->set_ctrl(n, region);
1542       follow_barrier_uses(n, ctrl, uses, phase);
1543     }
1544 
1545     // The slow path call produces memory: hook the raw memory phi
1546     // from the expanded load reference barrier with the rest of the graph
1547     // which may require adding memory phis at every post dominated
1548     // region and at enclosing loop heads. Use the memory state
1549     // collected in memory_nodes to fix the memory graph. Update that
1550     // memory state as we go.
1551     fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1552   }
1553   // Done expanding load-reference-barriers.
1554   assert(phase->C->shenandoah_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1555 
1556 }
1557 
1558 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1559   if (visited.test_set(in->_idx)) {
1560     return NULL;
1561   }
1562   switch (in->Opcode()) {
1563     case Op_Proj:
1564       return get_load_addr(phase, visited, in->in(0));
1565     case Op_CastPP:
1566     case Op_CheckCastPP:
1567     case Op_DecodeN:
1568     case Op_EncodeP:
1569       return get_load_addr(phase, visited, in->in(1));
1570     case Op_LoadN:
1571     case Op_LoadP:
1572       return in->in(MemNode::Address);
1573     case Op_GetAndSetN:
1574     case Op_GetAndSetP:
1575       // Those instructions would just have stored a different
1576       // value into the field. No use to attempt to fix it at this point.
1577       return phase->igvn().zerocon(T_OBJECT);
1578     case Op_CMoveP:
1579     case Op_CMoveN: {
1580       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1581       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1582       // Handle unambiguous cases: single address reported on both branches.
1583       if (t != NULL && f == NULL) return t;
1584       if (t == NULL && f != NULL) return f;
1585       if (t != NULL && t == f)    return t;
1586       // Ambiguity.
1587       return phase->igvn().zerocon(T_OBJECT);
1588     }
1589     case Op_Phi: {
1590       Node* addr = NULL;
1591       for (uint i = 1; i < in->req(); i++) {
1592         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1593         if (addr == NULL) {
1594           addr = addr1;
1595         }
1596         if (addr != addr1) {
1597           return phase->igvn().zerocon(T_OBJECT);
1598         }
1599       }
1600       return addr;
1601     }
1602     case Op_ShenandoahLoadReferenceBarrier:
1603       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1604     case Op_CallDynamicJava:
1605     case Op_CallLeaf:
1606     case Op_CallStaticJava:
1607     case Op_ConN:
1608     case Op_ConP:
1609     case Op_Parm:
1610     case Op_CreateEx:
1611       return phase->igvn().zerocon(T_OBJECT);
1612     default:
1613 #ifdef ASSERT
1614       fatal(err_msg("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]));
1615 #endif
1616       return phase->igvn().zerocon(T_OBJECT);
1617   }
1618 
1619 }
1620 
1621 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1622   IdealLoopTree *loop = phase->get_loop(iff);
1623   Node* loop_head = loop->_head;
1624   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1625 
1626   Node* bol = iff->in(1);
1627   Node* cmp = bol->in(1);
1628   Node* andi = cmp->in(1);
1629   Node* load = andi->in(1);
1630 
1631   assert(is_gc_state_load(load), "broken");
1632   if (!phase->is_dominator(load->in(0), entry_c)) {
1633     Node* mem_ctrl = NULL;
1634     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1635     load = load->clone();
1636     load->set_req(MemNode::Memory, mem);
1637     load->set_req(0, entry_c);
1638     phase->register_new_node(load, entry_c);
1639     andi = andi->clone();
1640     andi->set_req(1, load);
1641     phase->register_new_node(andi, entry_c);
1642     cmp = cmp->clone();
1643     cmp->set_req(1, andi);
1644     phase->register_new_node(cmp, entry_c);
1645     bol = bol->clone();
1646     bol->set_req(1, cmp);
1647     phase->register_new_node(bol, entry_c);
1648 
1649     Node* old_bol =iff->in(1);
1650     phase->igvn().replace_input_of(iff, 1, bol);
1651   }
1652 }
1653 
1654 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1655   if (!n->is_If() || n->is_CountedLoopEnd()) {
1656     return false;
1657   }
1658   Node* region = n->in(0);
1659 
1660   if (!region->is_Region()) {
1661     return false;
1662   }
1663   Node* dom = phase->idom(region);
1664   if (!dom->is_If()) {
1665     return false;
1666   }
1667 
1668   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1669     return false;
1670   }
1671 
1672   IfNode* dom_if = dom->as_If();
1673   Node* proj_true = dom_if->proj_out(1);
1674   Node* proj_false = dom_if->proj_out(0);
1675 
1676   for (uint i = 1; i < region->req(); i++) {
1677     if (phase->is_dominator(proj_true, region->in(i))) {
1678       continue;
1679     }
1680     if (phase->is_dominator(proj_false, region->in(i))) {
1681       continue;
1682     }
1683     return false;
1684   }
1685 
1686   return true;
1687 }
1688 
1689 static bool merge_point_too_heavy(Compile* C, Node* region) {
1690   // Bail out if the region and its phis have too many users.
1691   int weight = 0;
1692   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1693     weight += region->fast_out(i)->outcnt();
1694   }
1695   int nodes_left = C->max_node_limit() - C->live_nodes();
1696   if (weight * 8 > nodes_left) {
1697 #ifndef PRODUCT
1698     if (PrintOpto) {
1699       tty->print_cr("*** Split-if bails out:  %d nodes, region weight %d", C->unique(), weight);
1700     }
1701 #endif
1702     return true;
1703   } else {
1704     return false;
1705   }
1706 }
1707 
1708 static bool merge_point_safe(Node* region) {
1709   // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1710   // having a PhiNode input. This sidesteps the dangerous case where the split
1711   // ConvI2LNode may become TOP if the input Value() does not
1712   // overlap the ConvI2L range, leaving a node which may not dominate its
1713   // uses.
1714   // A better fix for this problem can be found in the BugTraq entry, but
1715   // expediency for Mantis demands this hack.
1716   // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
1717   // split_if_with_blocks from splitting a block because we could not move around
1718   // the FastLockNode.
1719   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1720     Node* n = region->fast_out(i);
1721     if (n->is_Phi()) {
1722       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1723         Node* m = n->fast_out(j);
1724         if (m->is_FastLock())
1725           return false;
1726 #ifdef _LP64
1727         if (m->Opcode() == Op_ConvI2L)
1728           return false;
1729         if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
1730           return false;
1731         }
1732 #endif
1733       }
1734     }
1735   }
1736   return true;
1737 }
1738 
1739 static bool can_split_if(PhaseIdealLoop* phase, Node* n_ctrl) {
1740   if (phase->C->live_nodes() > 35000) {
1741     return false; // Method too big
1742   }
1743 
1744   // Do not do 'split-if' if irreducible loops are present.
1745   if (phase->_has_irreducible_loops) {
1746     return false;
1747   }
1748 
1749   if (merge_point_too_heavy(phase->C, n_ctrl)) {
1750     return false;
1751   }
1752 
1753   // Do not do 'split-if' if some paths are dead.  First do dead code
1754   // elimination and then see if its still profitable.
1755   for (uint i = 1; i < n_ctrl->req(); i++) {
1756     if (n_ctrl->in(i) == phase->C->top()) {
1757       return false;
1758     }
1759   }
1760 
1761   // If trying to do a 'Split-If' at the loop head, it is only
1762   // profitable if the cmp folds up on BOTH paths.  Otherwise we
1763   // risk peeling a loop forever.
1764 
1765   // CNC - Disabled for now.  Requires careful handling of loop
1766   // body selection for the cloned code.  Also, make sure we check
1767   // for any input path not being in the same loop as n_ctrl.  For
1768   // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1769   // because the alternative loop entry points won't be converted
1770   // into LoopNodes.
1771   IdealLoopTree *n_loop = phase->get_loop(n_ctrl);
1772   for (uint j = 1; j < n_ctrl->req(); j++) {
1773     if (phase->get_loop(n_ctrl->in(j)) != n_loop) {
1774       return false;
1775     }
1776   }
1777 
1778   // Check for safety of the merge point.
1779   if (!merge_point_safe(n_ctrl)) {
1780     return false;
1781   }
1782 
1783   return true;
1784 }
1785 
1786 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1787   assert(is_heap_stable_test(n), "no other tests");
1788   if (identical_backtoback_ifs(n, phase)) {
1789     Node* n_ctrl = n->in(0);
1790     if (can_split_if(phase, n_ctrl)) {
1791       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1792       if (is_heap_stable_test(n)) {
1793         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1794         assert(is_gc_state_load(gc_state_load), "broken");
1795         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1796         assert(is_gc_state_load(dom_gc_state_load), "broken");
1797         if (gc_state_load != dom_gc_state_load) {
1798           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1799         }
1800       }
1801       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1802       Node* proj_true = dom_if->proj_out(1);
1803       Node* proj_false = dom_if->proj_out(0);
1804       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1805       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1806 
1807       for (uint i = 1; i < n_ctrl->req(); i++) {
1808         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1809           bolphi->init_req(i, con_true);
1810         } else {
1811           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1812           bolphi->init_req(i, con_false);
1813         }
1814       }
1815       phase->register_new_node(bolphi, n_ctrl);
1816       phase->igvn().replace_input_of(n, 1, bolphi);
1817       phase->do_split_if(n);
1818     }
1819   }
1820 }
1821 
1822 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1823   // Find first invariant test that doesn't exit the loop
1824   LoopNode *head = loop->_head->as_Loop();
1825   IfNode* unswitch_iff = NULL;
1826   Node* n = head->in(LoopNode::LoopBackControl);
1827   int loop_has_sfpts = -1;
1828   while (n != head) {
1829     Node* n_dom = phase->idom(n);
1830     if (n->is_Region()) {
1831       if (n_dom->is_If()) {
1832         IfNode* iff = n_dom->as_If();
1833         if (iff->in(1)->is_Bool()) {
1834           BoolNode* bol = iff->in(1)->as_Bool();
1835           if (bol->in(1)->is_Cmp()) {
1836             // If condition is invariant and not a loop exit,
1837             // then found reason to unswitch.
1838             if (is_heap_stable_test(iff) &&
1839                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1840               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1841               if (loop_has_sfpts == -1) {
1842                 for(uint i = 0; i < loop->_body.size(); i++) {
1843                   Node *m = loop->_body[i];
1844                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1845                     loop_has_sfpts = 1;
1846                     break;
1847                   }
1848                 }
1849                 if (loop_has_sfpts == -1) {
1850                   loop_has_sfpts = 0;
1851                 }
1852               }
1853               if (!loop_has_sfpts) {
1854                 unswitch_iff = iff;
1855               }
1856             }
1857           }
1858         }
1859       }
1860     }
1861     n = n_dom;
1862   }
1863   return unswitch_iff;
1864 }
1865 
1866 
1867 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1868   Node_List heap_stable_tests;
1869   stack.push(phase->C->start(), 0);
1870   do {
1871     Node* n = stack.node();
1872     uint i = stack.index();
1873 
1874     if (i < n->outcnt()) {
1875       Node* u = n->raw_out(i);
1876       stack.set_index(i+1);
1877       if (!visited.test_set(u->_idx)) {
1878         stack.push(u, 0);
1879       }
1880     } else {
1881       stack.pop();
1882       if (n->is_If() && is_heap_stable_test(n)) {
1883         heap_stable_tests.push(n);
1884       }
1885     }
1886   } while (stack.size() > 0);
1887 
1888   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1889     Node* n = heap_stable_tests.at(i);
1890     assert(is_heap_stable_test(n), "only evacuation test");
1891     merge_back_to_back_tests(n, phase);
1892   }
1893 
1894   if (!phase->C->major_progress()) {
1895     VectorSet seen(Thread::current()->resource_area());
1896     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1897       Node* n = heap_stable_tests.at(i);
1898       IdealLoopTree* loop = phase->get_loop(n);
1899       if (loop != phase->ltree_root() &&
1900           loop->_child == NULL &&
1901           !loop->_irreducible) {
1902         Node* head = loop->_head;
1903         if (head->is_Loop() &&
1904             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1905             !seen.test_set(head->_idx)) {
1906           IfNode* iff = find_unswitching_candidate(loop, phase);
1907           if (iff != NULL) {
1908             Node* bol = iff->in(1);
1909             move_gc_state_test_out_of_loop(iff, phase);
1910             if (loop->policy_unswitching(phase)) {
1911               phase->do_unswitching(loop, old_new);
1912             } else {
1913               // Not proceeding with unswitching. Move load back in
1914               // the loop.
1915               phase->igvn().replace_input_of(iff, 1, bol);
1916             }
1917           }
1918         }
1919       }
1920     }
1921   }
1922 }
1923 
1924 #ifdef ASSERT
1925 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
1926   const bool trace = false;
1927   ResourceMark rm;
1928   Unique_Node_List nodes;
1929   Unique_Node_List controls;
1930   Unique_Node_List memories;
1931 
1932   nodes.push(root);
1933   for (uint next = 0; next < nodes.size(); next++) {
1934     Node *n  = nodes.at(next);
1935     if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
1936       controls.push(n);
1937       if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
1938       for (uint next2 = 0; next2 < controls.size(); next2++) {
1939         Node *m = controls.at(next2);
1940         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1941           Node* u = m->fast_out(i);
1942           if (u->is_CFG() && !u->is_Root() &&
1943               !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
1944               !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
1945             if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
1946             controls.push(u);
1947           }
1948         }
1949       }
1950       memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
1951       for (uint next2 = 0; next2 < memories.size(); next2++) {
1952         Node *m = memories.at(next2);
1953         assert(m->bottom_type() == Type::MEMORY, "");
1954         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
1955           Node* u = m->fast_out(i);
1956           if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
1957             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1958             memories.push(u);
1959           } else if (u->is_LoadStore()) {
1960             if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
1961             memories.push(u->find_out_with(Op_SCMemProj));
1962           } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
1963             if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1964             memories.push(u);
1965           } else if (u->is_Phi()) {
1966             assert(u->bottom_type() == Type::MEMORY, "");
1967             if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
1968               assert(controls.member(u->in(0)), "");
1969               if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
1970               memories.push(u);
1971             }
1972           } else if (u->is_SafePoint() || u->is_MemBar()) {
1973             for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
1974               Node* uu = u->fast_out(j);
1975               if (uu->bottom_type() == Type::MEMORY) {
1976                 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
1977                 memories.push(uu);
1978               }
1979             }
1980           }
1981         }
1982       }
1983       for (uint next2 = 0; next2 < controls.size(); next2++) {
1984         Node *m = controls.at(next2);
1985         if (m->is_Region()) {
1986           bool all_in = true;
1987           for (uint i = 1; i < m->req(); i++) {
1988             if (!controls.member(m->in(i))) {
1989               all_in = false;
1990               break;
1991             }
1992           }
1993           if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
1994           bool found_phi = false;
1995           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
1996             Node* u = m->fast_out(j);
1997             if (u->is_Phi() && memories.member(u)) {
1998               found_phi = true;
1999               for (uint i = 1; i < u->req() && found_phi; i++) {
2000                 Node* k = u->in(i);
2001                 if (memories.member(k) != controls.member(m->in(i))) {
2002                   found_phi = false;
2003                 }
2004               }
2005             }
2006           }
2007           assert(found_phi || all_in, "");
2008         }
2009       }
2010       controls.clear();
2011       memories.clear();
2012     }
2013     for( uint i = 0; i < n->len(); ++i ) {
2014       Node *m = n->in(i);
2015       if (m != NULL) {
2016         nodes.push(m);
2017       }
2018     }
2019   }
2020 }
2021 #endif
2022 
2023 #ifdef ASSERT
2024 static bool has_never_branch(Node* root) {
2025   for (uint i = 1; i < root->req(); i++) {
2026     Node* in = root->in(i);
2027     if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2028       return true;
2029     }
2030   }
2031   return false;
2032 }
2033 #endif
2034 
2035 void MemoryGraphFixer::collect_memory_nodes() {
2036   Node_Stack stack(0);
2037   VectorSet visited(Thread::current()->resource_area());
2038   Node_List regions;
2039 
2040   // Walk the raw memory graph and create a mapping from CFG node to
2041   // memory node. Exclude phis for now.
2042   stack.push(_phase->C->root(), 1);
2043   do {
2044     Node* n = stack.node();
2045     int opc = n->Opcode();
2046     uint i = stack.index();
2047     if (i < n->req()) {
2048       Node* mem = NULL;
2049       if (opc == Op_Root) {
2050         Node* in = n->in(i);
2051         int in_opc = in->Opcode();
2052         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2053           mem = in->in(TypeFunc::Memory);
2054         } else if (in_opc == Op_Halt) {
2055           if (in->in(0)->is_Region()) {
2056             Node* r = in->in(0);
2057             for (uint j = 1; j < r->req(); j++) {
2058               assert(r->in(j)->Opcode() != Op_NeverBranch, "");
2059             }
2060           } else {
2061             Node* proj = in->in(0);
2062             assert(proj->is_Proj(), "");
2063             Node* in = proj->in(0);
2064             assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2065             if (in->is_CallStaticJava()) {
2066               mem = in->in(TypeFunc::Memory);
2067             } else if (in->Opcode() == Op_Catch) {
2068               Node* call = in->in(0)->in(0);
2069               assert(call->is_Call(), "");
2070               mem = call->in(TypeFunc::Memory);
2071             } else if (in->Opcode() == Op_NeverBranch) {
2072               Node* head = in->in(0);
2073               assert(head->is_Region(), "unexpected infinite loop graph shape");
2074 
2075               Node* phi_mem = NULL;
2076               for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2077                 Node* u = head->fast_out(j);
2078                 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2079                   if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2080                     assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2081                     phi_mem = u;
2082                   } else if (u->adr_type() == TypePtr::BOTTOM) {
2083                     assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2084                     if (phi_mem == NULL) {
2085                       phi_mem = u;
2086                     }
2087                   }
2088                 }
2089               }
2090               if (phi_mem == NULL) {
2091                 for (uint j = 1; j < head->req(); j++) {
2092                   Node* tail = head->in(j);
2093                   if (!_phase->is_dominator(head, tail)) {
2094                     continue;
2095                   }
2096                   Node* c = tail;
2097                   while (c != head) {
2098                     if (c->is_SafePoint() && !c->is_CallLeaf()) {
2099                       Node* m =c->in(TypeFunc::Memory);
2100                       if (m->is_MergeMem()) {
2101                         m = m->as_MergeMem()->memory_at(_alias);
2102                       }
2103                       assert(mem == NULL || mem == m, "several memory states");
2104                       mem = m;
2105                     }
2106                     c = _phase->idom(c);
2107                   }
2108                   assert(mem != NULL, "should have found safepoint");
2109                 }
2110                 assert(mem != NULL, "should have found safepoint");
2111               } else {
2112                 mem = phi_mem;
2113               }
2114             }
2115           }
2116         } else {
2117 #ifdef ASSERT
2118           n->dump();
2119           in->dump();
2120 #endif
2121           ShouldNotReachHere();
2122         }
2123       } else {
2124         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2125         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2126         mem = n->in(i);
2127       }
2128       i++;
2129       stack.set_index(i);
2130       if (mem == NULL) {
2131         continue;
2132       }
2133       for (;;) {
2134         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2135           break;
2136         }
2137         if (mem->is_Phi()) {
2138           stack.push(mem, 2);
2139           mem = mem->in(1);
2140         } else if (mem->is_Proj()) {
2141           stack.push(mem, mem->req());
2142           mem = mem->in(0);
2143         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2144           mem = mem->in(TypeFunc::Memory);
2145         } else if (mem->is_MergeMem()) {
2146           MergeMemNode* mm = mem->as_MergeMem();
2147           mem = mm->memory_at(_alias);
2148         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2149           assert(_alias == Compile::AliasIdxRaw, "");
2150           stack.push(mem, mem->req());
2151           mem = mem->in(MemNode::Memory);
2152         } else {
2153 #ifdef ASSERT
2154           mem->dump();
2155 #endif
2156           ShouldNotReachHere();
2157         }
2158       }
2159     } else {
2160       if (n->is_Phi()) {
2161         // Nothing
2162       } else if (!n->is_Root()) {
2163         Node* c = get_ctrl(n);
2164         _memory_nodes.map(c->_idx, n);
2165       }
2166       stack.pop();
2167     }
2168   } while(stack.is_nonempty());
2169 
2170   // Iterate over CFG nodes in rpo and propagate memory state to
2171   // compute memory state at regions, creating new phis if needed.
2172   Node_List rpo_list;
2173   visited.Clear();
2174   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2175   Node* root = rpo_list.pop();
2176   assert(root == _phase->C->root(), "");
2177 
2178   const bool trace = false;
2179 #ifdef ASSERT
2180   if (trace) {
2181     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2182       Node* c = rpo_list.at(i);
2183       if (_memory_nodes[c->_idx] != NULL) {
2184         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2185       }
2186     }
2187   }
2188 #endif
2189   uint last = _phase->C->unique();
2190 
2191 #ifdef ASSERT
2192   uint8_t max_depth = 0;
2193   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2194     IdealLoopTree* lpt = iter.current();
2195     max_depth = MAX2(max_depth, lpt->_nest);
2196   }
2197 #endif
2198 
2199   bool progress = true;
2200   int iteration = 0;
2201   Node_List dead_phis;
2202   while (progress) {
2203     progress = false;
2204     iteration++;
2205     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2206     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2207     IdealLoopTree* last_updated_ilt = NULL;
2208     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2209       Node* c = rpo_list.at(i);
2210 
2211       Node* prev_mem = _memory_nodes[c->_idx];
2212       if (c->is_Region()) {
2213         Node* prev_region = regions[c->_idx];
2214         Node* unique = NULL;
2215         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2216           Node* m = _memory_nodes[c->in(j)->_idx];
2217           assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2218           if (m != NULL) {
2219             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2220               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2221               // continue
2222             } else if (unique == NULL) {
2223               unique = m;
2224             } else if (m == unique) {
2225               // continue
2226             } else {
2227               unique = NodeSentinel;
2228             }
2229           }
2230         }
2231         assert(unique != NULL, "empty phi???");
2232         if (unique != NodeSentinel) {
2233           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2234             dead_phis.push(prev_region);
2235           }
2236           regions.map(c->_idx, unique);
2237         } else {
2238           Node* phi = NULL;
2239           if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2240             phi = prev_region;
2241             for (uint k = 1; k < c->req(); k++) {
2242               Node* m = _memory_nodes[c->in(k)->_idx];
2243               assert(m != NULL, "expect memory state");
2244               phi->set_req(k, m);
2245             }
2246           } else {
2247             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2248               Node* u = c->fast_out(j);
2249               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2250                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2251                 phi = u;
2252                 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2253                   Node* m = _memory_nodes[c->in(k)->_idx];
2254                   assert(m != NULL, "expect memory state");
2255                   if (u->in(k) != m) {
2256                     phi = NULL;
2257                   }
2258                 }
2259               }
2260             }
2261             if (phi == NULL) {
2262               phi = new (_phase->C) PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2263               for (uint k = 1; k < c->req(); k++) {
2264                 Node* m = _memory_nodes[c->in(k)->_idx];
2265                 assert(m != NULL, "expect memory state");
2266                 phi->init_req(k, m);
2267               }
2268             }
2269           }
2270           assert(phi != NULL, "");
2271           regions.map(c->_idx, phi);
2272         }
2273         Node* current_region = regions[c->_idx];
2274         if (current_region != prev_region) {
2275           progress = true;
2276           if (prev_region == prev_mem) {
2277             _memory_nodes.map(c->_idx, current_region);
2278           }
2279         }
2280       } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2281         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2282         assert(m != NULL, "expect memory state");
2283         if (m != prev_mem) {
2284           _memory_nodes.map(c->_idx, m);
2285           progress = true;
2286         }
2287       }
2288 #ifdef ASSERT
2289       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2290 #endif
2291     }
2292   }
2293 
2294   // Replace existing phi with computed memory state for that region
2295   // if different (could be a new phi or a dominating memory node if
2296   // that phi was found to be useless).
2297   while (dead_phis.size() > 0) {
2298     Node* n = dead_phis.pop();
2299     n->replace_by(_phase->C->top());
2300     n->destruct();
2301   }
2302   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2303     Node* c = rpo_list.at(i);
2304     if (c->is_Region()) {
2305       Node* n = regions[c->_idx];
2306       if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2307         _phase->register_new_node(n, c);
2308       }
2309     }
2310   }
2311   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2312     Node* c = rpo_list.at(i);
2313     if (c->is_Region()) {
2314       Node* n = regions[c->_idx];
2315       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2316         Node* u = c->fast_out(i);
2317         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2318             u != n) {
2319           if (u->adr_type() == TypePtr::BOTTOM) {
2320             fix_memory_uses(u, n, n, c);
2321           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2322             _phase->lazy_replace(u, n);
2323             --i; --imax;
2324           }
2325         }
2326       }
2327     }
2328   }
2329 }
2330 
2331 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2332   Node* c = _phase->get_ctrl(n);
2333   if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2334     assert(c == n->in(0), "");
2335     CallNode* call = c->as_Call();
2336     CallProjections projs;
2337     call->extract_projections(&projs, true, false);
2338     if (projs.catchall_memproj != NULL) {
2339       if (projs.fallthrough_memproj == n) {
2340         c = projs.fallthrough_catchproj;
2341       } else {
2342         assert(projs.catchall_memproj == n, "");
2343         c = projs.catchall_catchproj;
2344       }
2345     }
2346   }
2347   return c;
2348 }
2349 
2350 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2351   if (_phase->has_ctrl(n))
2352     return get_ctrl(n);
2353   else {
2354     assert (n->is_CFG(), "must be a CFG node");
2355     return n;
2356   }
2357 }
2358 
2359 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2360   return m != NULL && get_ctrl(m) == c;
2361 }
2362 
2363 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2364   assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2365   Node* mem = _memory_nodes[ctrl->_idx];
2366   Node* c = ctrl;
2367   while (!mem_is_valid(mem, c) &&
2368          (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2369     c = _phase->idom(c);
2370     mem = _memory_nodes[c->_idx];
2371   }
2372   if (n != NULL && mem_is_valid(mem, c)) {
2373     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2374       mem = next_mem(mem, _alias);
2375     }
2376     if (mem->is_MergeMem()) {
2377       mem = mem->as_MergeMem()->memory_at(_alias);
2378     }
2379     if (!mem_is_valid(mem, c)) {
2380       do {
2381         c = _phase->idom(c);
2382         mem = _memory_nodes[c->_idx];
2383       } while (!mem_is_valid(mem, c) &&
2384                (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2385     }
2386   }
2387   assert(mem->bottom_type() == Type::MEMORY, "");
2388   return mem;
2389 }
2390 
2391 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2392   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2393     Node* use = region->fast_out(i);
2394     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2395         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2396       return true;
2397     }
2398   }
2399   return false;
2400 }
2401 
2402 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2403   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2404   const bool trace = false;
2405   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2406   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2407   GrowableArray<Node*> phis;
2408   if (mem_for_ctrl != mem) {
2409     Node* old = mem_for_ctrl;
2410     Node* prev = NULL;
2411     while (old != mem) {
2412       prev = old;
2413       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2414         assert(_alias == Compile::AliasIdxRaw, "");
2415         old = old->in(MemNode::Memory);
2416       } else if (old->Opcode() == Op_SCMemProj) {
2417         assert(_alias == Compile::AliasIdxRaw, "");
2418         old = old->in(0);
2419       } else {
2420         ShouldNotReachHere();
2421       }
2422     }
2423     assert(prev != NULL, "");
2424     if (new_ctrl != ctrl) {
2425       _memory_nodes.map(ctrl->_idx, mem);
2426       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2427     }
2428     uint input = (uint)MemNode::Memory;
2429     _phase->igvn().replace_input_of(prev, input, new_mem);
2430   } else {
2431     uses.clear();
2432     _memory_nodes.map(new_ctrl->_idx, new_mem);
2433     uses.push(new_ctrl);
2434     for(uint next = 0; next < uses.size(); next++ ) {
2435       Node *n = uses.at(next);
2436       assert(n->is_CFG(), "");
2437       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2438       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2439         Node* u = n->fast_out(i);
2440         if (!u->is_Root() && u->is_CFG() && u != n) {
2441           Node* m = _memory_nodes[u->_idx];
2442           if (u->is_Region() &&
2443               !has_mem_phi(u) &&
2444               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2445             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2446             DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2447 
2448             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2449               bool push = true;
2450               bool create_phi = true;
2451               if (_phase->is_dominator(new_ctrl, u)) {
2452                 create_phi = false;
2453               }
2454               if (create_phi) {
2455                 Node* phi = new (_phase->C) PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2456                 _phase->register_new_node(phi, u);
2457                 phis.push(phi);
2458                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2459                 if (!mem_is_valid(m, u)) {
2460                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2461                   _memory_nodes.map(u->_idx, phi);
2462                 } else {
2463                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2464                   for (;;) {
2465                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2466                     Node* next = NULL;
2467                     if (m->is_Proj()) {
2468                       next = m->in(0);
2469                     } else {
2470                       assert(m->is_Mem() || m->is_LoadStore(), "");
2471                       assert(_alias == Compile::AliasIdxRaw, "");
2472                       next = m->in(MemNode::Memory);
2473                     }
2474                     if (_phase->get_ctrl(next) != u) {
2475                       break;
2476                     }
2477                     if (next->is_MergeMem()) {
2478                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2479                       break;
2480                     }
2481                     if (next->is_Phi()) {
2482                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2483                       break;
2484                     }
2485                     m = next;
2486                   }
2487 
2488                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2489                   assert(m->is_Mem() || m->is_LoadStore(), "");
2490                   uint input = (uint)MemNode::Memory;
2491                   _phase->igvn().replace_input_of(m, input, phi);
2492                   push = false;
2493                 }
2494               } else {
2495                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2496               }
2497               if (push) {
2498                 uses.push(u);
2499               }
2500             }
2501           } else if (!mem_is_valid(m, u) &&
2502                      !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2503             uses.push(u);
2504           }
2505         }
2506       }
2507     }
2508     for (int i = 0; i < phis.length(); i++) {
2509       Node* n = phis.at(i);
2510       Node* r = n->in(0);
2511       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2512       for (uint j = 1; j < n->req(); j++) {
2513         Node* m = find_mem(r->in(j), NULL);
2514         _phase->igvn().replace_input_of(n, j, m);
2515         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2516       }
2517     }
2518   }
2519   uint last = _phase->C->unique();
2520   MergeMemNode* mm = NULL;
2521   int alias = _alias;
2522   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2523   // Process loads first to not miss an anti-dependency: if the memory
2524   // edge of a store is updated before a load is processed then an
2525   // anti-dependency may be missed.
2526   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2527     Node* u = mem->out(i);
2528     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2529       Node* m = find_mem(_phase->get_ctrl(u), u);
2530       if (m != mem) {
2531         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2532         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2533         --i;
2534       }
2535     }
2536   }
2537   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2538     Node* u = mem->out(i);
2539     if (u->_idx < last) {
2540       if (u->is_Mem()) {
2541         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2542           Node* m = find_mem(_phase->get_ctrl(u), u);
2543           if (m != mem) {
2544             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2545             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2546             --i;
2547           }
2548         }
2549       } else if (u->is_MergeMem()) {
2550         MergeMemNode* u_mm = u->as_MergeMem();
2551         if (u_mm->memory_at(alias) == mem) {
2552           MergeMemNode* newmm = NULL;
2553           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2554             Node* uu = u->fast_out(j);
2555             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2556             if (uu->is_Phi()) {
2557               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2558               Node* region = uu->in(0);
2559               int nb = 0;
2560               for (uint k = 1; k < uu->req(); k++) {
2561                 if (uu->in(k) == u) {
2562                   Node* m = find_mem(region->in(k), NULL);
2563                   if (m != mem) {
2564                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2565                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2566                     if (newmm != u) {
2567                       _phase->igvn().replace_input_of(uu, k, newmm);
2568                       nb++;
2569                       --jmax;
2570                     }
2571                   }
2572                 }
2573               }
2574               if (nb > 0) {
2575                 --j;
2576               }
2577             } else {
2578               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2579               if (m != mem) {
2580                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2581                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2582                 if (newmm != u) {
2583                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2584                   --j, --jmax;
2585                 }
2586               }
2587             }
2588           }
2589         }
2590       } else if (u->is_Phi()) {
2591         assert(u->bottom_type() == Type::MEMORY, "what else?");
2592         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2593           Node* region = u->in(0);
2594           bool replaced = false;
2595           for (uint j = 1; j < u->req(); j++) {
2596             if (u->in(j) == mem) {
2597               Node* m = find_mem(region->in(j), NULL);
2598               Node* nnew = m;
2599               if (m != mem) {
2600                 if (u->adr_type() == TypePtr::BOTTOM) {
2601                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2602                   nnew = mm;
2603                 }
2604                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2605                 _phase->igvn().replace_input_of(u, j, nnew);
2606                 replaced = true;
2607               }
2608             }
2609           }
2610           if (replaced) {
2611             --i;
2612           }
2613         }
2614       } else if ((u->adr_type() == TypePtr::BOTTOM) ||
2615                  u->adr_type() == NULL) {
2616         assert(u->adr_type() != NULL ||
2617                u->Opcode() == Op_Rethrow ||
2618                u->Opcode() == Op_Return ||
2619                u->Opcode() == Op_SafePoint ||
2620                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2621                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2622                u->Opcode() == Op_CallLeaf, "");
2623         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2624         if (m != mem) {
2625           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2626           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2627           --i;
2628         }
2629       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2630         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2631         if (m != mem) {
2632           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2633           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2634           --i;
2635         }
2636       } else if (u->adr_type() != TypePtr::BOTTOM &&
2637                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2638         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2639         assert(m != mem, "");
2640         // u is on the wrong slice...
2641         assert(u->is_ClearArray(), "");
2642         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2643         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2644         --i;
2645       }
2646     }
2647   }
2648 #ifdef ASSERT
2649   assert(new_mem->outcnt() > 0, "");
2650   for (int i = 0; i < phis.length(); i++) {
2651     Node* n = phis.at(i);
2652     assert(n->outcnt() > 0, "new phi must have uses now");
2653   }
2654 #endif
2655 }
2656 
2657 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2658   MergeMemNode* mm = MergeMemNode::make(_phase->C, mem);
2659   mm->set_memory_at(_alias, rep_proj);
2660   _phase->register_new_node(mm, rep_ctrl);
2661   return mm;
2662 }
2663 
2664 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2665   MergeMemNode* newmm = NULL;
2666   MergeMemNode* u_mm = u->as_MergeMem();
2667   Node* c = _phase->get_ctrl(u);
2668   if (_phase->is_dominator(c, rep_ctrl)) {
2669     c = rep_ctrl;
2670   } else {
2671     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2672   }
2673   if (u->outcnt() == 1) {
2674     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2675       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2676       --i;
2677     } else {
2678       _phase->igvn().rehash_node_delayed(u);
2679       u_mm->set_memory_at(_alias, rep_proj);
2680     }
2681     newmm = u_mm;
2682     _phase->set_ctrl_and_loop(u, c);
2683   } else {
2684     // can't simply clone u and then change one of its input because
2685     // it adds and then removes an edge which messes with the
2686     // DUIterator
2687     newmm = MergeMemNode::make(_phase->C, u_mm->base_memory());
2688     for (uint j = 0; j < u->req(); j++) {
2689       if (j < newmm->req()) {
2690         if (j == (uint)_alias) {
2691           newmm->set_req(j, rep_proj);
2692         } else if (newmm->in(j) != u->in(j)) {
2693           newmm->set_req(j, u->in(j));
2694         }
2695       } else if (j == (uint)_alias) {
2696         newmm->add_req(rep_proj);
2697       } else {
2698         newmm->add_req(u->in(j));
2699       }
2700     }
2701     if ((uint)_alias >= u->req()) {
2702       newmm->set_memory_at(_alias, rep_proj);
2703     }
2704     _phase->register_new_node(newmm, c);
2705   }
2706   return newmm;
2707 }
2708 
2709 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2710   if (phi->adr_type() == TypePtr::BOTTOM) {
2711     Node* region = phi->in(0);
2712     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2713       Node* uu = region->fast_out(j);
2714       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2715         return false;
2716       }
2717     }
2718     return true;
2719   }
2720   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2721 }
2722 
2723 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2724   uint last = _phase-> C->unique();
2725   MergeMemNode* mm = NULL;
2726   assert(mem->bottom_type() == Type::MEMORY, "");
2727   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2728     Node* u = mem->out(i);
2729     if (u != replacement && u->_idx < last) {
2730       if (u->is_MergeMem()) {
2731         MergeMemNode* u_mm = u->as_MergeMem();
2732         if (u_mm->memory_at(_alias) == mem) {
2733           MergeMemNode* newmm = NULL;
2734           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2735             Node* uu = u->fast_out(j);
2736             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2737             if (uu->is_Phi()) {
2738               if (should_process_phi(uu)) {
2739                 Node* region = uu->in(0);
2740                 int nb = 0;
2741                 for (uint k = 1; k < uu->req(); k++) {
2742                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2743                     if (newmm == NULL) {
2744                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2745                     }
2746                     if (newmm != u) {
2747                       _phase->igvn().replace_input_of(uu, k, newmm);
2748                       nb++;
2749                       --jmax;
2750                     }
2751                   }
2752                 }
2753                 if (nb > 0) {
2754                   --j;
2755                 }
2756               }
2757             } else {
2758               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2759                 if (newmm == NULL) {
2760                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2761                 }
2762                 if (newmm != u) {
2763                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2764                   --j, --jmax;
2765                 }
2766               }
2767             }
2768           }
2769         }
2770       } else if (u->is_Phi()) {
2771         assert(u->bottom_type() == Type::MEMORY, "what else?");
2772         Node* region = u->in(0);
2773         if (should_process_phi(u)) {
2774           bool replaced = false;
2775           for (uint j = 1; j < u->req(); j++) {
2776             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2777               Node* nnew = rep_proj;
2778               if (u->adr_type() == TypePtr::BOTTOM) {
2779                 if (mm == NULL) {
2780                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2781                 }
2782                 nnew = mm;
2783               }
2784               _phase->igvn().replace_input_of(u, j, nnew);
2785               replaced = true;
2786             }
2787           }
2788           if (replaced) {
2789             --i;
2790           }
2791 
2792         }
2793       } else if ((u->adr_type() == TypePtr::BOTTOM) ||
2794                  u->adr_type() == NULL) {
2795         assert(u->adr_type() != NULL ||
2796                u->Opcode() == Op_Rethrow ||
2797                u->Opcode() == Op_Return ||
2798                u->Opcode() == Op_SafePoint ||
2799                u->Opcode() == Op_StoreIConditional ||
2800                u->Opcode() == Op_StoreLConditional ||
2801                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2802                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2803                u->Opcode() == Op_CallLeaf, err_msg("%s", u->Name()));
2804         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2805           if (mm == NULL) {
2806             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2807           }
2808           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2809           --i;
2810         }
2811       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2812         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2813           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2814           --i;
2815         }
2816       }
2817     }
2818   }
2819 }
2820 
2821 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
2822 : Node(ctrl, obj) {
2823   Compile::current()->add_shenandoah_barrier(this);
2824 }
2825 
2826 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2827   if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
2828     return Type::TOP;
2829   }
2830   const Type* t = in(ValueIn)->bottom_type();
2831   if (t == TypePtr::NULL_PTR) {
2832     return t;
2833   }
2834   return t->is_oopptr();
2835 }
2836 
2837 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseTransform *phase) const {
2838   // Either input is TOP ==> the result is TOP
2839   const Type *t2 = phase->type(in(ValueIn));
2840   if( t2 == Type::TOP ) return Type::TOP;
2841 
2842   if (t2 == TypePtr::NULL_PTR) {
2843     return t2;
2844   }
2845 
2846   const Type* type = t2->is_oopptr();
2847   return type;
2848 }
2849 
2850 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseTransform *phase) {
2851   Node* value = in(ValueIn);
2852   if (!needs_barrier(phase, value)) {
2853     return value;
2854   }
2855   return this;
2856 }
2857 
2858 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseTransform* phase, Node* n) {
2859   Unique_Node_List visited;
2860   return needs_barrier_impl(phase, n, visited);
2861 }
2862 
2863 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseTransform* phase, Node* n, Unique_Node_List &visited) {
2864   if (n == NULL) return false;
2865   if (visited.member(n)) {
2866     return false; // Been there.
2867   }
2868   visited.push(n);
2869 
2870   if (n->is_Allocate()) {
2871     // tty->print_cr("optimize barrier on alloc");
2872     return false;
2873   }
2874   if (n->is_Call()) {
2875     // tty->print_cr("optimize barrier on call");
2876     return false;
2877   }
2878 
2879   const Type* type = phase->type(n);
2880   if (type == Type::TOP) {
2881     return false;
2882   }
2883   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2884     // tty->print_cr("optimize barrier on null");
2885     return false;
2886   }
2887   // Impl detail: Need to check isa_(narrow)oop before calling to make_oopptr on potentially non-oop types
2888   // in 8u, otherwise make_oopptr would assert. make_oopptr is fixed later during JDK-8078629.
2889   if ((type->isa_oopptr() || type->isa_narrowoop()) && type->make_oopptr()->const_oop() != NULL) {
2890     // tty->print_cr("optimize barrier on constant");
2891     return false;
2892   }
2893 
2894   switch (n->Opcode()) {
2895     case Op_AddP:
2896       return true; // TODO: Can refine?
2897     case Op_LoadP:
2898     case Op_GetAndSetN:
2899     case Op_GetAndSetP:
2900       return true;
2901     case Op_Phi: {
2902       for (uint i = 1; i < n->req(); i++) {
2903         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2904       }
2905       return false;
2906     }
2907     case Op_CheckCastPP:
2908     case Op_CastPP:
2909       return needs_barrier_impl(phase, n->in(1), visited);
2910     case Op_Proj:
2911       return needs_barrier_impl(phase, n->in(0), visited);
2912     case Op_ShenandoahLoadReferenceBarrier:
2913       // tty->print_cr("optimize barrier on barrier");
2914       return false;
2915     case Op_Parm:
2916       // tty->print_cr("optimize barrier on input arg");
2917       return false;
2918     case Op_DecodeN:
2919     case Op_EncodeP:
2920       return needs_barrier_impl(phase, n->in(1), visited);
2921     case Op_LoadN:
2922       return true;
2923     case Op_CMoveN:
2924     case Op_CMoveP:
2925       return needs_barrier_impl(phase, n->in(2), visited) ||
2926              needs_barrier_impl(phase, n->in(3), visited);
2927     case Op_CreateEx:
2928       return false;
2929     default:
2930       break;
2931   }
2932 #ifdef ASSERT
2933   tty->print("need barrier on?: ");
2934   tty->print_cr("ins:");
2935   n->dump(2);
2936   tty->print_cr("outs:");
2937   n->dump(-2);
2938   ShouldNotReachHere();
2939 #endif
2940   return true;
2941 }
2942 
2943 bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
2944   Unique_Node_List visited;
2945   Node_Stack stack(0);
2946   stack.push(this, 0);
2947 
2948   // Check if the barrier is actually useful: go over nodes looking for useful uses
2949   // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk
2950   // until we ran out of nodes, and then declare the barrier redundant.
2951   while (stack.size() > 0) {
2952     Node* n = stack.node();
2953     if (visited.member(n)) {
2954       stack.pop();
2955       continue;
2956     }
2957     visited.push(n);
2958     bool visit_users = false;
2959     switch (n->Opcode()) {
2960       case Op_CallStaticJava:
2961       case Op_CallDynamicJava:
2962       case Op_CallLeaf:
2963       case Op_CallLeafNoFP:
2964       case Op_CompareAndSwapL:
2965       case Op_CompareAndSwapI:
2966       case Op_CompareAndSwapN:
2967       case Op_CompareAndSwapP:
2968       case Op_ShenandoahCompareAndSwapN:
2969       case Op_ShenandoahCompareAndSwapP:
2970       case Op_GetAndSetL:
2971       case Op_GetAndSetI:
2972       case Op_GetAndSetP:
2973       case Op_GetAndSetN:
2974       case Op_GetAndAddL:
2975       case Op_GetAndAddI:
2976       case Op_FastLock:
2977       case Op_FastUnlock:
2978       case Op_Rethrow:
2979       case Op_Return:
2980       case Op_StoreB:
2981       case Op_StoreC:
2982       case Op_StoreD:
2983       case Op_StoreF:
2984       case Op_StoreL:
2985       case Op_StoreLConditional:
2986       case Op_StoreI:
2987       case Op_StoreIConditional:
2988       case Op_StoreN:
2989       case Op_StoreP:
2990       case Op_StoreVector:
2991       case Op_EncodeP:
2992       case Op_CastP2X:
2993       case Op_SafePoint:
2994       case Op_EncodeISOArray:
2995       case Op_AryEq:
2996       case Op_StrEquals:
2997       case Op_StrComp:
2998       case Op_StrIndexOf:
2999         // Known to require barriers
3000         return false;
3001       case Op_CmpP: {
3002         if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) ||
3003             n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3004           // One of the sides is known null, no need for barrier.
3005         } else {
3006           return false;
3007         }
3008         break;
3009       }
3010       case Op_LoadB:
3011       case Op_LoadUB:
3012       case Op_LoadUS:
3013       case Op_LoadD:
3014       case Op_LoadF:
3015       case Op_LoadL:
3016       case Op_LoadI:
3017       case Op_LoadS:
3018       case Op_LoadN:
3019       case Op_LoadP:
3020       case Op_LoadVector: {
3021         const TypePtr* adr_type = n->adr_type();
3022         int alias_idx = Compile::current()->get_alias_index(adr_type);
3023         Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3024         ciField* field = alias_type->field();
3025         bool is_static = field != NULL && field->is_static();
3026         bool is_final = field != NULL && field->is_final();
3027 
3028         if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3029           // Loading the constant does not require barriers: it should be handled
3030           // as part of GC roots already.
3031         } else {
3032           return false;
3033         }
3034         break;
3035       }
3036       case Op_Conv2B:
3037       case Op_LoadRange:
3038       case Op_LoadKlass:
3039       case Op_LoadNKlass:
3040         // Do not require barriers
3041         break;
3042       case Op_AddP:
3043       case Op_CheckCastPP:
3044       case Op_CastPP:
3045       case Op_CMoveP:
3046       case Op_Phi:
3047       case Op_ShenandoahLoadReferenceBarrier:
3048         // Whether or not these need the barriers depends on their users
3049         visit_users = true;
3050         break;
3051       default: {
3052 #ifdef ASSERT
3053         fatal(err_msg("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()]));
3054 #else
3055         // Default to have excess barriers, rather than miss some.
3056         return false;
3057 #endif
3058       }
3059     }
3060 
3061     stack.pop();
3062     if (visit_users) {
3063       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3064         Node* user = n->fast_out(i);
3065         if (user != NULL) {
3066           stack.push(user, 0);
3067         }
3068       }
3069     }
3070   }
3071 
3072   // No need for barrier found.
3073   return true;
3074 }
3075 
3076 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3077   Node* val = in(ValueIn);
3078 
3079   const Type* val_t = igvn.type(val);
3080 
3081   if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3082       val->Opcode() == Op_CastPP &&
3083       val->in(0) != NULL &&
3084       val->in(0)->Opcode() == Op_IfTrue &&
3085       val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3086       val->in(0)->in(0)->is_If() &&
3087       val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3088       val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3089       val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3090       val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3091       val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3092     assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3093     CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3094     return unc;
3095   }
3096   return NULL;
3097 }