1 /*
   2  * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  32 #include "gc/shenandoah/shenandoahForwarding.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahRuntime.hpp"
  36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/callnode.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 
  47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  48   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  49   if ((state->iu_barriers_count() +
  50        state->load_reference_barriers_count()) > 0) {
  51     assert(C->post_loop_opts_phase(), "no loop opts allowed");
  52     C->reset_post_loop_opts_phase(); // ... but we know what we are doing
  53     bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
  54     C->clear_major_progress();
  55     PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
  56     if (C->failing()) return false;
  57     PhaseIdealLoop::verify(igvn);
  58     if (attempt_more_loopopts) {
  59       C->set_major_progress();
  60       if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  61         return false;
  62       }
  63       C->clear_major_progress();
  64 
  65       C->process_for_post_loop_opts_igvn(igvn);
  66       if (C->failing()) return false;
  67     }
  68     C->set_post_loop_opts_phase(); // now for real!
  69   }
  70   return true;
  71 }
  72 
  73 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  74   if (!UseShenandoahGC) {
  75     return false;
  76   }
  77   assert(iff->is_If(), "bad input");
  78   if (iff->Opcode() != Op_If) {
  79     return false;
  80   }
  81   Node* bol = iff->in(1);
  82   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  83     return false;
  84   }
  85   Node* cmp = bol->in(1);
  86   if (cmp->Opcode() != Op_CmpI) {
  87     return false;
  88   }
  89   Node* in1 = cmp->in(1);
  90   Node* in2 = cmp->in(2);
  91   if (in2->find_int_con(-1) != 0) {
  92     return false;
  93   }
  94   if (in1->Opcode() != Op_AndI) {
  95     return false;
  96   }
  97   in2 = in1->in(2);
  98   if (in2->find_int_con(-1) != mask) {
  99     return false;
 100   }
 101   in1 = in1->in(1);
 102 
 103   return is_gc_state_load(in1);
 104 }
 105 
 106 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 107   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 108 }
 109 
 110 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 111   if (!UseShenandoahGC) {
 112     return false;
 113   }
 114   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 115     return false;
 116   }
 117   Node* addp = n->in(MemNode::Address);
 118   if (!addp->is_AddP()) {
 119     return false;
 120   }
 121   Node* base = addp->in(AddPNode::Address);
 122   Node* off = addp->in(AddPNode::Offset);
 123   if (base->Opcode() != Op_ThreadLocal) {
 124     return false;
 125   }
 126   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 127     return false;
 128   }
 129   return true;
 130 }
 131 
 132 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 133   assert(phase->is_dominator(stop, start), "bad inputs");
 134   ResourceMark rm;
 135   Unique_Node_List wq;
 136   wq.push(start);
 137   for (uint next = 0; next < wq.size(); next++) {
 138     Node *m = wq.at(next);
 139     if (m == stop) {
 140       continue;
 141     }
 142     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 143       return true;
 144     }
 145     if (m->is_Region()) {
 146       for (uint i = 1; i < m->req(); i++) {
 147         wq.push(m->in(i));
 148       }
 149     } else {
 150       wq.push(m->in(0));
 151     }
 152   }
 153   return false;
 154 }
 155 
 156 #ifdef ASSERT
 157 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 158   assert(phis.size() == 0, "");
 159 
 160   while (true) {
 161     if (in->bottom_type() == TypePtr::NULL_PTR) {
 162       if (trace) {tty->print_cr("null");}
 163     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 164       if (trace) {tty->print_cr("Non oop");}
 165     } else {
 166       if (in->is_ConstraintCast()) {
 167         in = in->in(1);
 168         continue;
 169       } else if (in->is_AddP()) {
 170         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 171         in = in->in(AddPNode::Address);
 172         continue;
 173       } else if (in->is_Con()) {
 174         if (trace) {
 175           tty->print("Found constant");
 176           in->dump();
 177         }
 178       } else if (in->Opcode() == Op_Parm) {
 179         if (trace) {
 180           tty->print("Found argument");
 181         }
 182       } else if (in->Opcode() == Op_CreateEx) {
 183         if (trace) {
 184           tty->print("Found create-exception");
 185         }
 186       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 187         if (trace) {
 188           tty->print("Found raw LoadP (OSR argument?)");
 189         }
 190       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 191         if (t == ShenandoahOopStore) {
 192           uint i = 0;
 193           for (; i < phis.size(); i++) {
 194             Node* n = phis.node_at(i);
 195             if (n->Opcode() == Op_ShenandoahIUBarrier) {
 196               break;
 197             }
 198           }
 199           if (i == phis.size()) {
 200             return false;
 201           }
 202         }
 203         barriers_used.push(in);
 204         if (trace) {tty->print("Found barrier"); in->dump();}
 205       } else if (in->Opcode() == Op_ShenandoahIUBarrier) {
 206         if (t != ShenandoahOopStore) {
 207           in = in->in(1);
 208           continue;
 209         }
 210         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 211         phis.push(in, in->req());
 212         in = in->in(1);
 213         continue;
 214       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 215         if (trace) {
 216           tty->print("Found alloc");
 217           in->in(0)->dump();
 218         }
 219       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 220         if (trace) {
 221           tty->print("Found Java call");
 222         }
 223       } else if (in->is_Phi()) {
 224         if (!visited.test_set(in->_idx)) {
 225           if (trace) {tty->print("Pushed phi:"); in->dump();}
 226           phis.push(in, 2);
 227           in = in->in(1);
 228           continue;
 229         }
 230         if (trace) {tty->print("Already seen phi:"); in->dump();}
 231       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 232         if (!visited.test_set(in->_idx)) {
 233           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 234           phis.push(in, CMoveNode::IfTrue);
 235           in = in->in(CMoveNode::IfFalse);
 236           continue;
 237         }
 238         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 239       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 240         in = in->in(1);
 241         continue;
 242       } else {
 243         return false;
 244       }
 245     }
 246     bool cont = false;
 247     while (phis.is_nonempty()) {
 248       uint idx = phis.index();
 249       Node* phi = phis.node();
 250       if (idx >= phi->req()) {
 251         if (trace) {tty->print("Popped phi:"); phi->dump();}
 252         phis.pop();
 253         continue;
 254       }
 255       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 256       in = phi->in(idx);
 257       phis.set_index(idx+1);
 258       cont = true;
 259       break;
 260     }
 261     if (!cont) {
 262       break;
 263     }
 264   }
 265   return true;
 266 }
 267 
 268 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 269   if (n1 != nullptr) {
 270     n1->dump(+10);
 271   }
 272   if (n2 != nullptr) {
 273     n2->dump(+10);
 274   }
 275   fatal("%s", msg);
 276 }
 277 
 278 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 279   ResourceMark rm;
 280   Unique_Node_List wq;
 281   GrowableArray<Node*> barriers;
 282   Unique_Node_List barriers_used;
 283   Node_Stack phis(0);
 284   VectorSet visited;
 285   const bool trace = false;
 286   const bool verify_no_useless_barrier = false;
 287 
 288   wq.push(root);
 289   for (uint next = 0; next < wq.size(); next++) {
 290     Node *n = wq.at(next);
 291     if (n->is_Load()) {
 292       const bool trace = false;
 293       if (trace) {tty->print("Verifying"); n->dump();}
 294       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 295         if (trace) {tty->print_cr("Load range/klass");}
 296       } else {
 297         const TypePtr* adr_type = n->as_Load()->adr_type();
 298 
 299         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 300           if (trace) {tty->print_cr("Mark load");}
 301         } else if (adr_type->isa_instptr() &&
 302                    adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 303                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
 304           if (trace) {tty->print_cr("Reference.get()");}
 305         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 306           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 307         }
 308       }
 309     } else if (n->is_Store()) {
 310       const bool trace = false;
 311 
 312       if (trace) {tty->print("Verifying"); n->dump();}
 313       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 314         Node* adr = n->in(MemNode::Address);
 315         bool verify = true;
 316 
 317         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 318           adr = adr->in(AddPNode::Address);
 319           if (adr->is_AddP()) {
 320             assert(adr->in(AddPNode::Base)->is_top(), "");
 321             adr = adr->in(AddPNode::Address);
 322             if (adr->Opcode() == Op_LoadP &&
 323                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 324                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 325                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 326               if (trace) {tty->print_cr("SATB prebarrier");}
 327               verify = false;
 328             }
 329           }
 330         }
 331 
 332         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 333           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 334         }
 335       }
 336       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 337         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 338       }
 339     } else if (n->Opcode() == Op_CmpP) {
 340       const bool trace = false;
 341 
 342       Node* in1 = n->in(1);
 343       Node* in2 = n->in(2);
 344       if (in1->bottom_type()->isa_oopptr()) {
 345         if (trace) {tty->print("Verifying"); n->dump();}
 346 
 347         bool mark_inputs = false;
 348         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 349             (in1->is_Con() || in2->is_Con())) {
 350           if (trace) {tty->print_cr("Comparison against a constant");}
 351           mark_inputs = true;
 352         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 353                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 354           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 355           mark_inputs = true;
 356         } else {
 357           assert(in2->bottom_type()->isa_oopptr(), "");
 358 
 359           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 360               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 361             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 362           }
 363         }
 364         if (verify_no_useless_barrier &&
 365             mark_inputs &&
 366             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 367              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 368           phis.clear();
 369           visited.reset();
 370         }
 371       }
 372     } else if (n->is_LoadStore()) {
 373       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 374           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 375         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 376       }
 377 
 378       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 379         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 380       }
 381     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 382       CallNode* call = n->as_Call();
 383 
 384       static struct {
 385         const char* name;
 386         struct {
 387           int pos;
 388           verify_type t;
 389         } args[6];
 390       } calls[] = {
 391         "array_partition_stub",
 392         { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore },   { -1, ShenandoahNone },
 393           { -1, ShenandoahNone },                { -1, ShenandoahNone },                  { -1, ShenandoahNone } },
 394         "arraysort_stub",
 395         { { TypeFunc::Parms, ShenandoahStore },  { -1, ShenandoahNone },                  { -1, ShenandoahNone },
 396           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 397         "aescrypt_encryptBlock",
 398         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 399           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 400         "aescrypt_decryptBlock",
 401         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 402           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 403         "multiplyToLen",
 404         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 405           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 406         "squareToLen",
 407         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 408           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 409         "montgomery_multiply",
 410         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 411           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 412         "montgomery_square",
 413         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 414           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 415         "mulAdd",
 416         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 417           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 418         "vectorizedMismatch",
 419         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 420           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 421         "updateBytesCRC32",
 422         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 423           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 424         "updateBytesAdler32",
 425         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 426           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 427         "updateBytesCRC32C",
 428         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 429           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 430         "counterMode_AESCrypt",
 431         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 432           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 433         "cipherBlockChaining_encryptAESCrypt",
 434         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 435           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 436         "cipherBlockChaining_decryptAESCrypt",
 437         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 438           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 439         "shenandoah_clone_barrier",
 440         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 441           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 442         "ghash_processBlocks",
 443         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 444           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 445         "sha1_implCompress",
 446         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 447           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 448         "sha256_implCompress",
 449         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 450           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 451         "sha512_implCompress",
 452         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 453           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 454         "sha1_implCompressMB",
 455         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 456           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 457         "sha256_implCompressMB",
 458         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 459           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 460         "sha512_implCompressMB",
 461         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 462           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 463         "encodeBlock",
 464         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 465           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 466         "decodeBlock",
 467         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 468           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 469       };
 470 
 471       if (call->is_call_to_arraycopystub()) {
 472         Node* dest = nullptr;
 473         const TypeTuple* args = n->as_Call()->_tf->domain();
 474         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 475           if (args->field_at(i)->isa_ptr()) {
 476             j++;
 477             if (j == 2) {
 478               dest = n->in(i);
 479               break;
 480             }
 481           }
 482         }
 483         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 484             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 485           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 486         }
 487       } else if (strlen(call->_name) > 5 &&
 488                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 489         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 490           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 491         }
 492       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 493         // skip
 494       } else {
 495         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 496         int i = 0;
 497         for (; i < calls_len; i++) {
 498           if (!strcmp(calls[i].name, call->_name)) {
 499             break;
 500           }
 501         }
 502         if (i != calls_len) {
 503           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 504           for (uint j = 0; j < args_len; j++) {
 505             int pos = calls[i].args[j].pos;
 506             if (pos == -1) {
 507               break;
 508             }
 509             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 510               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 511             }
 512           }
 513           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 514             if (call->in(j)->bottom_type()->make_ptr() &&
 515                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 516               uint k = 0;
 517               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 518               if (k == args_len) {
 519                 fatal("arg %d for call %s not covered", j, call->_name);
 520               }
 521             }
 522           }
 523         } else {
 524           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 525             if (call->in(j)->bottom_type()->make_ptr() &&
 526                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 527               fatal("%s not covered", call->_name);
 528             }
 529           }
 530         }
 531       }
 532     } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 533       // skip
 534     } else if (n->is_AddP()
 535                || n->is_Phi()
 536                || n->is_ConstraintCast()
 537                || n->Opcode() == Op_Return
 538                || n->Opcode() == Op_CMoveP
 539                || n->Opcode() == Op_CMoveN
 540                || n->Opcode() == Op_Rethrow
 541                || n->is_MemBar()
 542                || n->Opcode() == Op_Conv2B
 543                || n->Opcode() == Op_SafePoint
 544                || n->is_CallJava()
 545                || n->Opcode() == Op_Unlock
 546                || n->Opcode() == Op_EncodeP
 547                || n->Opcode() == Op_DecodeN) {
 548       // nothing to do
 549     } else {
 550       static struct {
 551         int opcode;
 552         struct {
 553           int pos;
 554           verify_type t;
 555         } inputs[2];
 556       } others[] = {
 557         Op_FastLock,
 558         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 559         Op_Lock,
 560         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 561         Op_ArrayCopy,
 562         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 563         Op_StrCompressedCopy,
 564         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 565         Op_StrInflatedCopy,
 566         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 567         Op_AryEq,
 568         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 569         Op_StrIndexOf,
 570         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 571         Op_StrComp,
 572         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 573         Op_StrEquals,
 574         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 575         Op_VectorizedHashCode,
 576         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 577         Op_EncodeISOArray,
 578         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 579         Op_CountPositives,
 580         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 581         Op_CastP2X,
 582         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 583         Op_StrIndexOfChar,
 584         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 585       };
 586 
 587       const int others_len = sizeof(others) / sizeof(others[0]);
 588       int i = 0;
 589       for (; i < others_len; i++) {
 590         if (others[i].opcode == n->Opcode()) {
 591           break;
 592         }
 593       }
 594       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 595       if (i != others_len) {
 596         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 597         for (uint j = 0; j < inputs_len; j++) {
 598           int pos = others[i].inputs[j].pos;
 599           if (pos == -1) {
 600             break;
 601           }
 602           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 603             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 604           }
 605         }
 606         for (uint j = 1; j < stop; j++) {
 607           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 608               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 609             uint k = 0;
 610             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 611             if (k == inputs_len) {
 612               fatal("arg %d for node %s not covered", j, n->Name());
 613             }
 614           }
 615         }
 616       } else {
 617         for (uint j = 1; j < stop; j++) {
 618           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 619               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 620             fatal("%s not covered", n->Name());
 621           }
 622         }
 623       }
 624     }
 625 
 626     if (n->is_SafePoint()) {
 627       SafePointNode* sfpt = n->as_SafePoint();
 628       if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
 629         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 630           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 631             phis.clear();
 632             visited.reset();
 633           }
 634         }
 635       }
 636     }
 637   }
 638 
 639   if (verify_no_useless_barrier) {
 640     for (int i = 0; i < barriers.length(); i++) {
 641       Node* n = barriers.at(i);
 642       if (!barriers_used.member(n)) {
 643         tty->print("XXX useless barrier"); n->dump(-2);
 644         ShouldNotReachHere();
 645       }
 646     }
 647   }
 648 }
 649 #endif
 650 
 651 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 652   // That both nodes have the same control is not sufficient to prove
 653   // domination, verify that there's no path from d to n
 654   ResourceMark rm;
 655   Unique_Node_List wq;
 656   wq.push(d);
 657   for (uint next = 0; next < wq.size(); next++) {
 658     Node *m = wq.at(next);
 659     if (m == n) {
 660       return false;
 661     }
 662     if (m->is_Phi() && m->in(0)->is_Loop()) {
 663       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 664     } else {
 665       if (m->is_Store() || m->is_LoadStore()) {
 666         // Take anti-dependencies into account
 667         Node* mem = m->in(MemNode::Memory);
 668         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 669           Node* u = mem->fast_out(i);
 670           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 671               phase->ctrl_or_self(u) == c) {
 672             wq.push(u);
 673           }
 674         }
 675       }
 676       for (uint i = 0; i < m->req(); i++) {
 677         if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
 678           wq.push(m->in(i));
 679         }
 680       }
 681     }
 682   }
 683   return true;
 684 }
 685 
 686 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 687   if (d_c != n_c) {
 688     return phase->is_dominator(d_c, n_c);
 689   }
 690   return is_dominator_same_ctrl(d_c, d, n, phase);
 691 }
 692 
 693 Node* next_mem(Node* mem, int alias) {
 694   Node* res = nullptr;
 695   if (mem->is_Proj()) {
 696     res = mem->in(0);
 697   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 698     res = mem->in(TypeFunc::Memory);
 699   } else if (mem->is_Phi()) {
 700     res = mem->in(1);
 701   } else if (mem->is_MergeMem()) {
 702     res = mem->as_MergeMem()->memory_at(alias);
 703   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 704     assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 705     res = mem->in(MemNode::Memory);
 706   } else {
 707 #ifdef ASSERT
 708     mem->dump();
 709 #endif
 710     ShouldNotReachHere();
 711   }
 712   return res;
 713 }
 714 
 715 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 716   Node* iffproj = nullptr;
 717   while (c != dom) {
 718     Node* next = phase->idom(c);
 719     assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 720     if (c->is_Region()) {
 721       ResourceMark rm;
 722       Unique_Node_List wq;
 723       wq.push(c);
 724       for (uint i = 0; i < wq.size(); i++) {
 725         Node *n = wq.at(i);
 726         if (n == next) {
 727           continue;
 728         }
 729         if (n->is_Region()) {
 730           for (uint j = 1; j < n->req(); j++) {
 731             wq.push(n->in(j));
 732           }
 733         } else {
 734           wq.push(n->in(0));
 735         }
 736       }
 737       for (uint i = 0; i < wq.size(); i++) {
 738         Node *n = wq.at(i);
 739         assert(n->is_CFG(), "");
 740         if (n->is_Multi()) {
 741           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 742             Node* u = n->fast_out(j);
 743             if (u->is_CFG()) {
 744               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj()) {
 745                 return NodeSentinel;
 746               }
 747             }
 748           }
 749         }
 750       }
 751     } else  if (c->is_Proj()) {
 752       if (c->is_IfProj()) {
 753         if (c->as_Proj()->is_uncommon_trap_if_pattern() != nullptr) {
 754           // continue;
 755         } else {
 756           if (!allow_one_proj) {
 757             return NodeSentinel;
 758           }
 759           if (iffproj == nullptr) {
 760             iffproj = c;
 761           } else {
 762             return NodeSentinel;
 763           }
 764         }
 765       } else if (c->Opcode() == Op_JumpProj) {
 766         return NodeSentinel; // unsupported
 767       } else if (c->Opcode() == Op_CatchProj) {
 768         return NodeSentinel; // unsupported
 769       } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
 770         return NodeSentinel; // unsupported
 771       } else {
 772         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 773       }
 774     }
 775     c = next;
 776   }
 777   return iffproj;
 778 }
 779 
 780 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 781   ResourceMark rm;
 782   VectorSet wq;
 783   wq.set(mem->_idx);
 784   mem_ctrl = phase->ctrl_or_self(mem);
 785   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 786     mem = next_mem(mem, alias);
 787     if (wq.test_set(mem->_idx)) {
 788       return nullptr;
 789     }
 790     mem_ctrl = phase->ctrl_or_self(mem);
 791   }
 792   if (mem->is_MergeMem()) {
 793     mem = mem->as_MergeMem()->memory_at(alias);
 794     mem_ctrl = phase->ctrl_or_self(mem);
 795   }
 796   return mem;
 797 }
 798 
 799 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 800   Node* mem = nullptr;
 801   Node* c = ctrl;
 802   do {
 803     if (c->is_Region()) {
 804       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
 805         Node* u = c->fast_out(i);
 806         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 807           if (u->adr_type() == TypePtr::BOTTOM) {
 808             mem = u;
 809           }
 810         }
 811       }
 812     } else {
 813       if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
 814         CallProjections projs;
 815         c->as_Call()->extract_projections(&projs, true, false);
 816         if (projs.fallthrough_memproj != nullptr) {
 817           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 818             if (projs.catchall_memproj == nullptr) {
 819               mem = projs.fallthrough_memproj;
 820             } else {
 821               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 822                 mem = projs.fallthrough_memproj;
 823               } else {
 824                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 825                 mem = projs.catchall_memproj;
 826               }
 827             }
 828           }
 829         } else {
 830           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 831           if (proj != nullptr &&
 832               proj->adr_type() == TypePtr::BOTTOM) {
 833             mem = proj;
 834           }
 835         }
 836       } else {
 837         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 838           Node* u = c->fast_out(i);
 839           if (u->is_Proj() &&
 840               u->bottom_type() == Type::MEMORY &&
 841               u->adr_type() == TypePtr::BOTTOM) {
 842               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 843               assert(mem == nullptr, "only one proj");
 844               mem = u;
 845           }
 846         }
 847         assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
 848       }
 849     }
 850     c = phase->idom(c);
 851   } while (mem == nullptr);
 852   return mem;
 853 }
 854 
 855 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 856   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 857     Node* u = n->fast_out(i);
 858     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 859       uses.push(u);
 860     }
 861   }
 862 }
 863 
 864 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 865   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 866   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 867   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 868   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 869   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 870   phase->lazy_replace(outer, new_outer);
 871   phase->lazy_replace(le, new_le);
 872   inner->clear_strip_mined();
 873 }
 874 
 875 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 876                                                PhaseIdealLoop* phase, int flags) {
 877   PhaseIterGVN& igvn = phase->igvn();
 878   Node* old_ctrl = ctrl;
 879 
 880   Node* thread          = new ThreadLocalNode();
 881   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 882   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 883   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 884                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 885                                         TypeInt::BYTE, MemNode::unordered);
 886   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 887   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 888   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 889 
 890   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 891   ctrl                  = new IfTrueNode(gc_state_iff);
 892   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 893 
 894   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 895   phase->register_control(gc_state_iff,   loop, old_ctrl);
 896   phase->register_control(ctrl,           loop, gc_state_iff);
 897   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 898 
 899   phase->register_new_node(thread,        old_ctrl);
 900   phase->register_new_node(gc_state_addr, old_ctrl);
 901   phase->register_new_node(gc_state,      old_ctrl);
 902   phase->register_new_node(gc_state_and,  old_ctrl);
 903   phase->register_new_node(gc_state_cmp,  old_ctrl);
 904   phase->register_new_node(gc_state_bool, old_ctrl);
 905 
 906   phase->set_ctrl(gc_state_offset, phase->C->root());
 907 
 908   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 909 }
 910 
 911 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 912   Node* old_ctrl = ctrl;
 913   PhaseIterGVN& igvn = phase->igvn();
 914 
 915   const Type* val_t = igvn.type(val);
 916   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 917     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 918     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 919 
 920     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 921     ctrl             = new IfTrueNode(null_iff);
 922     null_ctrl        = new IfFalseNode(null_iff);
 923 
 924     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 925     phase->register_control(null_iff,  loop, old_ctrl);
 926     phase->register_control(ctrl,      loop, null_iff);
 927     phase->register_control(null_ctrl, loop, null_iff);
 928 
 929     phase->register_new_node(null_cmp,  old_ctrl);
 930     phase->register_new_node(null_test, old_ctrl);
 931   }
 932 }
 933 
 934 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 935   Node* old_ctrl = ctrl;
 936   PhaseIterGVN& igvn = phase->igvn();
 937 
 938   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 939   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 940 
 941   // Figure out the target cset address with raw pointer math.
 942   // This avoids matching AddP+LoadB that would emit inefficient code.
 943   // See JDK-8245465.
 944   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 945   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 946   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 947   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 948 
 949   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 950                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 951                                        TypeInt::BYTE, MemNode::unordered);
 952   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 953   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 954 
 955   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 956   ctrl                 = new IfTrueNode(cset_iff);
 957   not_cset_ctrl        = new IfFalseNode(cset_iff);
 958 
 959   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 960   phase->register_control(cset_iff,      loop, old_ctrl);
 961   phase->register_control(ctrl,          loop, cset_iff);
 962   phase->register_control(not_cset_ctrl, loop, cset_iff);
 963 
 964   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 965 
 966   phase->register_new_node(raw_val,        old_ctrl);
 967   phase->register_new_node(cset_idx,       old_ctrl);
 968   phase->register_new_node(cset_addr,      old_ctrl);
 969   phase->register_new_node(cset_load_addr, old_ctrl);
 970   phase->register_new_node(cset_load_ptr,  old_ctrl);
 971   phase->register_new_node(cset_load,      old_ctrl);
 972   phase->register_new_node(cset_cmp,       old_ctrl);
 973   phase->register_new_node(cset_bool,      old_ctrl);
 974 }
 975 
 976 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
 977                                                DecoratorSet decorators, PhaseIdealLoop* phase) {
 978   IdealLoopTree*loop = phase->get_loop(ctrl);
 979   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 980 
 981   address calladdr = nullptr;
 982   const char* name = nullptr;
 983   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 984   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 985   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 986   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 987   bool is_narrow  = UseCompressedOops && !is_native;
 988   if (is_strong) {
 989     if (is_narrow) {
 990       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 991       name = "load_reference_barrier_strong_narrow";
 992     } else {
 993       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 994       name = "load_reference_barrier_strong";
 995     }
 996   } else if (is_weak) {
 997     if (is_narrow) {
 998       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 999       name = "load_reference_barrier_weak_narrow";
1000     } else {
1001       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1002       name = "load_reference_barrier_weak";
1003     }
1004   } else {
1005     assert(is_phantom, "only remaining strength");
1006     if (is_narrow) {
1007       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1008       name = "load_reference_barrier_phantom_narrow";
1009     } else {
1010       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1011       name = "load_reference_barrier_phantom";
1012     }
1013   }
1014   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1015 
1016   call->init_req(TypeFunc::Control, ctrl);
1017   call->init_req(TypeFunc::I_O, phase->C->top());
1018   call->init_req(TypeFunc::Memory, phase->C->top());
1019   call->init_req(TypeFunc::FramePtr, phase->C->top());
1020   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1021   call->init_req(TypeFunc::Parms, val);
1022   call->init_req(TypeFunc::Parms+1, load_addr);
1023   phase->register_control(call, loop, ctrl);
1024   ctrl = new ProjNode(call, TypeFunc::Control);
1025   phase->register_control(ctrl, loop, call);
1026   val = new ProjNode(call, TypeFunc::Parms);
1027   phase->register_new_node(val, call);
1028   val = new CheckCastPPNode(ctrl, val, obj_type);
1029   phase->register_new_node(val, ctrl);
1030 }
1031 
1032 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1033   Node* ctrl = phase->get_ctrl(barrier);
1034   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1035 
1036   // Update the control of all nodes that should be after the
1037   // barrier control flow
1038   uses.clear();
1039   // Every node that is control dependent on the barrier's input
1040   // control will be after the expanded barrier. The raw memory (if
1041   // its memory is control dependent on the barrier's input control)
1042   // must stay above the barrier.
1043   uses_to_ignore.clear();
1044   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1045     uses_to_ignore.push(init_raw_mem);
1046   }
1047   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1048     Node *n = uses_to_ignore.at(next);
1049     for (uint i = 0; i < n->req(); i++) {
1050       Node* in = n->in(i);
1051       if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1052         uses_to_ignore.push(in);
1053       }
1054     }
1055   }
1056   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1057     Node* u = ctrl->fast_out(i);
1058     if (u->_idx < last &&
1059         u != barrier &&
1060         !uses_to_ignore.member(u) &&
1061         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1062         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1063       Node* old_c = phase->ctrl_or_self(u);
1064       Node* c = old_c;
1065       if (c != ctrl ||
1066           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1067           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1068         phase->igvn().rehash_node_delayed(u);
1069         int nb = u->replace_edge(ctrl, region, &phase->igvn());
1070         if (u->is_CFG()) {
1071           if (phase->idom(u) == ctrl) {
1072             phase->set_idom(u, region, phase->dom_depth(region));
1073           }
1074         } else if (phase->get_ctrl(u) == ctrl) {
1075           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1076           uses.push(u);
1077         }
1078         assert(nb == 1, "more than 1 ctrl input?");
1079         --i, imax -= nb;
1080       }
1081     }
1082   }
1083 }
1084 
1085 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1086   Node* region = nullptr;
1087   while (c != ctrl) {
1088     if (c->is_Region()) {
1089       region = c;
1090     }
1091     c = phase->idom(c);
1092   }
1093   assert(region != nullptr, "");
1094   Node* phi = new PhiNode(region, n->bottom_type());
1095   for (uint j = 1; j < region->req(); j++) {
1096     Node* in = region->in(j);
1097     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1098       phi->init_req(j, n);
1099     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1100       phi->init_req(j, n_clone);
1101     } else {
1102       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1103     }
1104   }
1105   phase->register_new_node(phi, region);
1106   return phi;
1107 }
1108 
1109 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1110   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1111 
1112   Unique_Node_List uses;
1113   for (int i = 0; i < state->iu_barriers_count(); i++) {
1114     Node* barrier = state->iu_barrier(i);
1115     Node* ctrl = phase->get_ctrl(barrier);
1116     IdealLoopTree* loop = phase->get_loop(ctrl);
1117     Node* head = loop->head();
1118     if (head->is_OuterStripMinedLoop()) {
1119       // Expanding a barrier here will break loop strip mining
1120       // verification. Transform the loop so the loop nest doesn't
1121       // appear as strip mined.
1122       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1123       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1124     }
1125   }
1126 
1127   Node_Stack stack(0);
1128   Node_List clones;
1129   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1130     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1131 
1132     Node* ctrl = phase->get_ctrl(lrb);
1133     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1134 
1135     CallStaticJavaNode* unc = nullptr;
1136     Node* unc_ctrl = nullptr;
1137     Node* uncasted_val = val;
1138 
1139     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1140       Node* u = lrb->fast_out(i);
1141       if (u->Opcode() == Op_CastPP &&
1142           u->in(0) != nullptr &&
1143           phase->is_dominator(u->in(0), ctrl)) {
1144         const Type* u_t = phase->igvn().type(u);
1145 
1146         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1147             u->in(0)->Opcode() == Op_IfTrue &&
1148             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern() &&
1149             u->in(0)->in(0)->is_If() &&
1150             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1151             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1152             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1153             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1154             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1155           IdealLoopTree* loop = phase->get_loop(ctrl);
1156           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1157 
1158           if (!unc_loop->is_member(loop)) {
1159             continue;
1160           }
1161 
1162           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1163           assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1164           if (branch == NodeSentinel) {
1165             continue;
1166           }
1167 
1168           Node* iff = u->in(0)->in(0);
1169           Node* bol = iff->in(1)->clone();
1170           Node* cmp = bol->in(1)->clone();
1171           cmp->set_req(1, lrb);
1172           bol->set_req(1, cmp);
1173           phase->igvn().replace_input_of(iff, 1, bol);
1174           phase->set_ctrl(lrb, iff->in(0));
1175           phase->register_new_node(cmp, iff->in(0));
1176           phase->register_new_node(bol, iff->in(0));
1177           break;
1178         }
1179       }
1180     }
1181     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1182       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1183       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1184         // The rethrow call may have too many projections to be
1185         // properly handled here. Given there's no reason for a
1186         // barrier to depend on the call, move it above the call
1187         stack.push(lrb, 0);
1188         do {
1189           Node* n = stack.node();
1190           uint idx = stack.index();
1191           if (idx < n->req()) {
1192             Node* in = n->in(idx);
1193             stack.set_index(idx+1);
1194             if (in != nullptr) {
1195               if (phase->has_ctrl(in)) {
1196                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1197 #ifdef ASSERT
1198                   for (uint i = 0; i < stack.size(); i++) {
1199                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1200                   }
1201 #endif
1202                   stack.push(in, 0);
1203                 }
1204               } else {
1205                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1206               }
1207             }
1208           } else {
1209             phase->set_ctrl(n, call->in(0));
1210             stack.pop();
1211           }
1212         } while(stack.size() > 0);
1213         continue;
1214       }
1215       CallProjections projs;
1216       call->extract_projections(&projs, false, false);
1217 
1218 #ifdef ASSERT
1219       VectorSet cloned;
1220 #endif
1221       Node* lrb_clone = lrb->clone();
1222       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1223       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1224 
1225       stack.push(lrb, 0);
1226       clones.push(lrb_clone);
1227 
1228       do {
1229         assert(stack.size() == clones.size(), "");
1230         Node* n = stack.node();
1231 #ifdef ASSERT
1232         if (n->is_Load()) {
1233           Node* mem = n->in(MemNode::Memory);
1234           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1235             Node* u = mem->fast_out(j);
1236             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1237           }
1238         }
1239 #endif
1240         uint idx = stack.index();
1241         Node* n_clone = clones.at(clones.size()-1);
1242         if (idx < n->outcnt()) {
1243           Node* u = n->raw_out(idx);
1244           Node* c = phase->ctrl_or_self(u);
1245           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1246             stack.set_index(idx+1);
1247             assert(!u->is_CFG(), "");
1248             stack.push(u, 0);
1249             assert(!cloned.test_set(u->_idx), "only one clone");
1250             Node* u_clone = u->clone();
1251             int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1252             assert(nb > 0, "should have replaced some uses");
1253             phase->register_new_node(u_clone, projs.catchall_catchproj);
1254             clones.push(u_clone);
1255             phase->set_ctrl(u, projs.fallthrough_catchproj);
1256           } else {
1257             bool replaced = false;
1258             if (u->is_Phi()) {
1259               for (uint k = 1; k < u->req(); k++) {
1260                 if (u->in(k) == n) {
1261                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1262                     phase->igvn().replace_input_of(u, k, n_clone);
1263                     replaced = true;
1264                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1265                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1266                     replaced = true;
1267                   }
1268                 }
1269               }
1270             } else {
1271               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1272                 phase->igvn().rehash_node_delayed(u);
1273                 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1274                 assert(nb > 0, "should have replaced some uses");
1275                 replaced = true;
1276               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1277                 if (u->is_If()) {
1278                   // Can't break If/Bool/Cmp chain
1279                   assert(n->is_Bool(), "unexpected If shape");
1280                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1281                   assert(n_clone->is_Bool(), "unexpected clone");
1282                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1283                   Node* bol_clone = n->clone();
1284                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1285                   bol_clone->set_req(1, cmp_clone);
1286 
1287                   Node* nn = stack.node_at(stack.size()-3);
1288                   Node* nn_clone = clones.at(clones.size()-3);
1289                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1290 
1291                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1292                                                    &phase->igvn());
1293                   assert(nb > 0, "should have replaced some uses");
1294 
1295                   phase->register_new_node(bol_clone, u->in(0));
1296                   phase->register_new_node(cmp_clone, u->in(0));
1297 
1298                   phase->igvn().replace_input_of(u, 1, bol_clone);
1299 
1300                 } else {
1301                   phase->igvn().rehash_node_delayed(u);
1302                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1303                   assert(nb > 0, "should have replaced some uses");
1304                 }
1305                 replaced = true;
1306               }
1307             }
1308             if (!replaced) {
1309               stack.set_index(idx+1);
1310             }
1311           }
1312         } else {
1313           stack.pop();
1314           clones.pop();
1315         }
1316       } while (stack.size() > 0);
1317       assert(stack.size() == 0 && clones.size() == 0, "");
1318     }
1319   }
1320 
1321   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1322     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1323     Node* ctrl = phase->get_ctrl(lrb);
1324     IdealLoopTree* loop = phase->get_loop(ctrl);
1325     Node* head = loop->head();
1326     if (head->is_OuterStripMinedLoop()) {
1327       // Expanding a barrier here will break loop strip mining
1328       // verification. Transform the loop so the loop nest doesn't
1329       // appear as strip mined.
1330       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1331       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1332     }
1333   }
1334 
1335   // Expand load-reference-barriers
1336   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1337   Unique_Node_List uses_to_ignore;
1338   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1339     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1340     uint last = phase->C->unique();
1341     Node* ctrl = phase->get_ctrl(lrb);
1342     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1343 
1344     Node* orig_ctrl = ctrl;
1345 
1346     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1347     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1348 
1349     IdealLoopTree *loop = phase->get_loop(ctrl);
1350 
1351     Node* heap_stable_ctrl = nullptr;
1352     Node* null_ctrl = nullptr;
1353 
1354     assert(val->bottom_type()->make_oopptr(), "need oop");
1355     assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1356 
1357     enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1358     Node* region = new RegionNode(PATH_LIMIT);
1359     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1360 
1361     // Stable path.
1362     int flags = ShenandoahHeap::HAS_FORWARDED;
1363     if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1364       flags |= ShenandoahHeap::WEAK_ROOTS;
1365     }
1366     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1367     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1368 
1369     // Heap stable case
1370     region->init_req(_heap_stable, heap_stable_ctrl);
1371     val_phi->init_req(_heap_stable, val);
1372 
1373     // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1374     // even for non-cset objects to prevent resurrection of such objects.
1375     // Wires !in_cset(obj) to slot 2 of region and phis
1376     Node* not_cset_ctrl = nullptr;
1377     if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1378       test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1379     }
1380     if (not_cset_ctrl != nullptr) {
1381       region->init_req(_not_cset, not_cset_ctrl);
1382       val_phi->init_req(_not_cset, val);
1383     } else {
1384       region->del_req(_not_cset);
1385       val_phi->del_req(_not_cset);
1386     }
1387 
1388     // Resolve object when orig-value is in cset.
1389     // Make the unconditional resolve for fwdptr.
1390 
1391     // Call lrb-stub and wire up that path in slots 4
1392     Node* result_mem = nullptr;
1393 
1394     Node* addr;
1395     if (ShenandoahSelfFixing) {
1396       VectorSet visited;
1397       addr = get_load_addr(phase, visited, lrb);
1398     } else {
1399       addr = phase->igvn().zerocon(T_OBJECT);
1400     }
1401     if (addr->Opcode() == Op_AddP) {
1402       Node* orig_base = addr->in(AddPNode::Base);
1403       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1404       phase->register_new_node(base, ctrl);
1405       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1406         // Field access
1407         addr = addr->clone();
1408         addr->set_req(AddPNode::Base, base);
1409         addr->set_req(AddPNode::Address, base);
1410         phase->register_new_node(addr, ctrl);
1411       } else {
1412         Node* addr2 = addr->in(AddPNode::Address);
1413         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1414               addr2->in(AddPNode::Base) == orig_base) {
1415           addr2 = addr2->clone();
1416           addr2->set_req(AddPNode::Base, base);
1417           addr2->set_req(AddPNode::Address, base);
1418           phase->register_new_node(addr2, ctrl);
1419           addr = addr->clone();
1420           addr->set_req(AddPNode::Base, base);
1421           addr->set_req(AddPNode::Address, addr2);
1422           phase->register_new_node(addr, ctrl);
1423         }
1424       }
1425     }
1426     call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1427     region->init_req(_evac_path, ctrl);
1428     val_phi->init_req(_evac_path, val);
1429 
1430     phase->register_control(region, loop, heap_stable_iff);
1431     Node* out_val = val_phi;
1432     phase->register_new_node(val_phi, region);
1433 
1434     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1435 
1436     ctrl = orig_ctrl;
1437 
1438     phase->igvn().replace_node(lrb, out_val);
1439 
1440     follow_barrier_uses(out_val, ctrl, uses, phase);
1441 
1442     for(uint next = 0; next < uses.size(); next++ ) {
1443       Node *n = uses.at(next);
1444       assert(phase->get_ctrl(n) == ctrl, "bad control");
1445       assert(n != raw_mem, "should leave input raw mem above the barrier");
1446       phase->set_ctrl(n, region);
1447       follow_barrier_uses(n, ctrl, uses, phase);
1448     }
1449     fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1450   }
1451   // Done expanding load-reference-barriers.
1452   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1453 
1454   for (int i = state->iu_barriers_count() - 1; i >= 0; i--) {
1455     Node* barrier = state->iu_barrier(i);
1456     Node* pre_val = barrier->in(1);
1457 
1458     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1459       ShouldNotReachHere();
1460       continue;
1461     }
1462 
1463     Node* ctrl = phase->get_ctrl(barrier);
1464 
1465     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1466       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1467       ctrl = ctrl->in(0)->in(0);
1468       phase->set_ctrl(barrier, ctrl);
1469     } else if (ctrl->is_CallRuntime()) {
1470       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1471       ctrl = ctrl->in(0);
1472       phase->set_ctrl(barrier, ctrl);
1473     }
1474 
1475     Node* init_ctrl = ctrl;
1476     IdealLoopTree* loop = phase->get_loop(ctrl);
1477     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1478     Node* init_raw_mem = raw_mem;
1479     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1480     Node* heap_stable_ctrl = nullptr;
1481     Node* null_ctrl = nullptr;
1482     uint last = phase->C->unique();
1483 
1484     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1485     Node* region = new RegionNode(PATH_LIMIT);
1486     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1487 
1488     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1489     Node* region2 = new RegionNode(PATH_LIMIT2);
1490     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1491 
1492     // Stable path.
1493     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1494     region->init_req(_heap_stable, heap_stable_ctrl);
1495     phi->init_req(_heap_stable, raw_mem);
1496 
1497     // Null path
1498     Node* reg2_ctrl = nullptr;
1499     test_null(ctrl, pre_val, null_ctrl, phase);
1500     if (null_ctrl != nullptr) {
1501       reg2_ctrl = null_ctrl->in(0);
1502       region2->init_req(_null_path, null_ctrl);
1503       phi2->init_req(_null_path, raw_mem);
1504     } else {
1505       region2->del_req(_null_path);
1506       phi2->del_req(_null_path);
1507     }
1508 
1509     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1510     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1511     Node* thread = new ThreadLocalNode();
1512     phase->register_new_node(thread, ctrl);
1513     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1514     phase->register_new_node(buffer_adr, ctrl);
1515     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1516     phase->register_new_node(index_adr, ctrl);
1517 
1518     BasicType index_bt = TypeX_X->basic_type();
1519     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1520     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1521     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1522     phase->register_new_node(index, ctrl);
1523     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1524     phase->register_new_node(index_cmp, ctrl);
1525     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1526     phase->register_new_node(index_test, ctrl);
1527     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1528     if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff;
1529     phase->register_control(queue_full_iff, loop, ctrl);
1530     Node* not_full = new IfTrueNode(queue_full_iff);
1531     phase->register_control(not_full, loop, queue_full_iff);
1532     Node* full = new IfFalseNode(queue_full_iff);
1533     phase->register_control(full, loop, queue_full_iff);
1534 
1535     ctrl = not_full;
1536 
1537     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1538     phase->register_new_node(next_index, ctrl);
1539 
1540     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1541     phase->register_new_node(buffer, ctrl);
1542     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1543     phase->register_new_node(log_addr, ctrl);
1544     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1545     phase->register_new_node(log_store, ctrl);
1546     // update the index
1547     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1548     phase->register_new_node(index_update, ctrl);
1549 
1550     // Fast-path case
1551     region2->init_req(_fast_path, ctrl);
1552     phi2->init_req(_fast_path, index_update);
1553 
1554     ctrl = full;
1555 
1556     Node* base = find_bottom_mem(ctrl, phase);
1557 
1558     MergeMemNode* mm = MergeMemNode::make(base);
1559     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1560     phase->register_new_node(mm, ctrl);
1561 
1562     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1563     call->init_req(TypeFunc::Control, ctrl);
1564     call->init_req(TypeFunc::I_O, phase->C->top());
1565     call->init_req(TypeFunc::Memory, mm);
1566     call->init_req(TypeFunc::FramePtr, phase->C->top());
1567     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1568     call->init_req(TypeFunc::Parms, pre_val);
1569     call->init_req(TypeFunc::Parms+1, thread);
1570     phase->register_control(call, loop, ctrl);
1571 
1572     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1573     phase->register_control(ctrl_proj, loop, call);
1574     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1575     phase->register_new_node(mem_proj, call);
1576 
1577     // Slow-path case
1578     region2->init_req(_slow_path, ctrl_proj);
1579     phi2->init_req(_slow_path, mem_proj);
1580 
1581     phase->register_control(region2, loop, reg2_ctrl);
1582     phase->register_new_node(phi2, region2);
1583 
1584     region->init_req(_heap_unstable, region2);
1585     phi->init_req(_heap_unstable, phi2);
1586 
1587     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1588     phase->register_new_node(phi, region);
1589 
1590     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1591     for(uint next = 0; next < uses.size(); next++ ) {
1592       Node *n = uses.at(next);
1593       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1594       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1595       phase->set_ctrl(n, region);
1596       follow_barrier_uses(n, init_ctrl, uses, phase);
1597     }
1598     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1599 
1600     phase->igvn().replace_node(barrier, pre_val);
1601   }
1602   assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1603 
1604 }
1605 
1606 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1607   if (visited.test_set(in->_idx)) {
1608     return nullptr;
1609   }
1610   switch (in->Opcode()) {
1611     case Op_Proj:
1612       return get_load_addr(phase, visited, in->in(0));
1613     case Op_CastPP:
1614     case Op_CheckCastPP:
1615     case Op_DecodeN:
1616     case Op_EncodeP:
1617       return get_load_addr(phase, visited, in->in(1));
1618     case Op_LoadN:
1619     case Op_LoadP:
1620       return in->in(MemNode::Address);
1621     case Op_CompareAndExchangeN:
1622     case Op_CompareAndExchangeP:
1623     case Op_GetAndSetN:
1624     case Op_GetAndSetP:
1625     case Op_ShenandoahCompareAndExchangeP:
1626     case Op_ShenandoahCompareAndExchangeN:
1627       // Those instructions would just have stored a different
1628       // value into the field. No use to attempt to fix it at this point.
1629       return phase->igvn().zerocon(T_OBJECT);
1630     case Op_CMoveP:
1631     case Op_CMoveN: {
1632       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1633       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1634       // Handle unambiguous cases: single address reported on both branches.
1635       if (t != nullptr && f == nullptr) return t;
1636       if (t == nullptr && f != nullptr) return f;
1637       if (t != nullptr && t == f)    return t;
1638       // Ambiguity.
1639       return phase->igvn().zerocon(T_OBJECT);
1640     }
1641     case Op_Phi: {
1642       Node* addr = nullptr;
1643       for (uint i = 1; i < in->req(); i++) {
1644         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1645         if (addr == nullptr) {
1646           addr = addr1;
1647         }
1648         if (addr != addr1) {
1649           return phase->igvn().zerocon(T_OBJECT);
1650         }
1651       }
1652       return addr;
1653     }
1654     case Op_ShenandoahLoadReferenceBarrier:
1655       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1656     case Op_ShenandoahIUBarrier:
1657       return get_load_addr(phase, visited, in->in(1));
1658     case Op_CallDynamicJava:
1659     case Op_CallLeaf:
1660     case Op_CallStaticJava:
1661     case Op_ConN:
1662     case Op_ConP:
1663     case Op_Parm:
1664     case Op_CreateEx:
1665       return phase->igvn().zerocon(T_OBJECT);
1666     default:
1667 #ifdef ASSERT
1668       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1669 #endif
1670       return phase->igvn().zerocon(T_OBJECT);
1671   }
1672 
1673 }
1674 
1675 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1676   IdealLoopTree *loop = phase->get_loop(iff);
1677   Node* loop_head = loop->_head;
1678   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1679 
1680   Node* bol = iff->in(1);
1681   Node* cmp = bol->in(1);
1682   Node* andi = cmp->in(1);
1683   Node* load = andi->in(1);
1684 
1685   assert(is_gc_state_load(load), "broken");
1686   if (!phase->is_dominator(load->in(0), entry_c)) {
1687     Node* mem_ctrl = nullptr;
1688     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1689     load = load->clone();
1690     load->set_req(MemNode::Memory, mem);
1691     load->set_req(0, entry_c);
1692     phase->register_new_node(load, entry_c);
1693     andi = andi->clone();
1694     andi->set_req(1, load);
1695     phase->register_new_node(andi, entry_c);
1696     cmp = cmp->clone();
1697     cmp->set_req(1, andi);
1698     phase->register_new_node(cmp, entry_c);
1699     bol = bol->clone();
1700     bol->set_req(1, cmp);
1701     phase->register_new_node(bol, entry_c);
1702 
1703     phase->igvn().replace_input_of(iff, 1, bol);
1704   }
1705 }
1706 
1707 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1708   if (!n->is_If() || n->is_CountedLoopEnd()) {
1709     return false;
1710   }
1711   Node* region = n->in(0);
1712 
1713   if (!region->is_Region()) {
1714     return false;
1715   }
1716   Node* dom = phase->idom(region);
1717   if (!dom->is_If()) {
1718     return false;
1719   }
1720 
1721   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1722     return false;
1723   }
1724 
1725   IfNode* dom_if = dom->as_If();
1726   Node* proj_true = dom_if->proj_out(1);
1727   Node* proj_false = dom_if->proj_out(0);
1728 
1729   for (uint i = 1; i < region->req(); i++) {
1730     if (phase->is_dominator(proj_true, region->in(i))) {
1731       continue;
1732     }
1733     if (phase->is_dominator(proj_false, region->in(i))) {
1734       continue;
1735     }
1736     return false;
1737   }
1738 
1739   return true;
1740 }
1741 
1742 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1743   assert(is_heap_stable_test(n), "no other tests");
1744   if (identical_backtoback_ifs(n, phase)) {
1745     Node* n_ctrl = n->in(0);
1746     if (phase->can_split_if(n_ctrl)) {
1747       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1748       if (is_heap_stable_test(n)) {
1749         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1750         assert(is_gc_state_load(gc_state_load), "broken");
1751         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1752         assert(is_gc_state_load(dom_gc_state_load), "broken");
1753         if (gc_state_load != dom_gc_state_load) {
1754           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1755         }
1756       }
1757       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1758       Node* proj_true = dom_if->proj_out(1);
1759       Node* proj_false = dom_if->proj_out(0);
1760       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1761       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1762 
1763       for (uint i = 1; i < n_ctrl->req(); i++) {
1764         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1765           bolphi->init_req(i, con_true);
1766         } else {
1767           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1768           bolphi->init_req(i, con_false);
1769         }
1770       }
1771       phase->register_new_node(bolphi, n_ctrl);
1772       phase->igvn().replace_input_of(n, 1, bolphi);
1773       phase->do_split_if(n);
1774     }
1775   }
1776 }
1777 
1778 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1779   // Find first invariant test that doesn't exit the loop
1780   LoopNode *head = loop->_head->as_Loop();
1781   IfNode* unswitch_iff = nullptr;
1782   Node* n = head->in(LoopNode::LoopBackControl);
1783   int loop_has_sfpts = -1;
1784   while (n != head) {
1785     Node* n_dom = phase->idom(n);
1786     if (n->is_Region()) {
1787       if (n_dom->is_If()) {
1788         IfNode* iff = n_dom->as_If();
1789         if (iff->in(1)->is_Bool()) {
1790           BoolNode* bol = iff->in(1)->as_Bool();
1791           if (bol->in(1)->is_Cmp()) {
1792             // If condition is invariant and not a loop exit,
1793             // then found reason to unswitch.
1794             if (is_heap_stable_test(iff) &&
1795                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1796               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1797               if (loop_has_sfpts == -1) {
1798                 for(uint i = 0; i < loop->_body.size(); i++) {
1799                   Node *m = loop->_body[i];
1800                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1801                     loop_has_sfpts = 1;
1802                     break;
1803                   }
1804                 }
1805                 if (loop_has_sfpts == -1) {
1806                   loop_has_sfpts = 0;
1807                 }
1808               }
1809               if (!loop_has_sfpts) {
1810                 unswitch_iff = iff;
1811               }
1812             }
1813           }
1814         }
1815       }
1816     }
1817     n = n_dom;
1818   }
1819   return unswitch_iff;
1820 }
1821 
1822 
1823 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1824   Node_List heap_stable_tests;
1825   stack.push(phase->C->start(), 0);
1826   do {
1827     Node* n = stack.node();
1828     uint i = stack.index();
1829 
1830     if (i < n->outcnt()) {
1831       Node* u = n->raw_out(i);
1832       stack.set_index(i+1);
1833       if (!visited.test_set(u->_idx)) {
1834         stack.push(u, 0);
1835       }
1836     } else {
1837       stack.pop();
1838       if (n->is_If() && is_heap_stable_test(n)) {
1839         heap_stable_tests.push(n);
1840       }
1841     }
1842   } while (stack.size() > 0);
1843 
1844   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1845     Node* n = heap_stable_tests.at(i);
1846     assert(is_heap_stable_test(n), "only evacuation test");
1847     merge_back_to_back_tests(n, phase);
1848   }
1849 
1850   if (!phase->C->major_progress()) {
1851     VectorSet seen;
1852     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1853       Node* n = heap_stable_tests.at(i);
1854       IdealLoopTree* loop = phase->get_loop(n);
1855       if (loop != phase->ltree_root() &&
1856           loop->_child == nullptr &&
1857           !loop->_irreducible) {
1858         Node* head = loop->_head;
1859         if (head->is_Loop() &&
1860             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1861             !seen.test_set(head->_idx)) {
1862           IfNode* iff = find_unswitching_candidate(loop, phase);
1863           if (iff != nullptr) {
1864             Node* bol = iff->in(1);
1865             if (head->as_Loop()->is_strip_mined()) {
1866               head->as_Loop()->verify_strip_mined(0);
1867             }
1868             move_gc_state_test_out_of_loop(iff, phase);
1869 
1870             AutoNodeBudget node_budget(phase);
1871 
1872             if (loop->policy_unswitching(phase)) {
1873               if (head->as_Loop()->is_strip_mined()) {
1874                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1875                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1876               }
1877               phase->do_unswitching(loop, old_new);
1878             } else {
1879               // Not proceeding with unswitching. Move load back in
1880               // the loop.
1881               phase->igvn().replace_input_of(iff, 1, bol);
1882             }
1883           }
1884         }
1885       }
1886     }
1887   }
1888 }
1889 
1890 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) {
1891   ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
1892 }
1893 
1894 const Type* ShenandoahIUBarrierNode::bottom_type() const {
1895   if (in(1) == nullptr || in(1)->is_top()) {
1896     return Type::TOP;
1897   }
1898   const Type* t = in(1)->bottom_type();
1899   if (t == TypePtr::NULL_PTR) {
1900     return t;
1901   }
1902   return t->is_oopptr();
1903 }
1904 
1905 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
1906   if (in(1) == nullptr) {
1907     return Type::TOP;
1908   }
1909   const Type* t = phase->type(in(1));
1910   if (t == Type::TOP) {
1911     return Type::TOP;
1912   }
1913   if (t == TypePtr::NULL_PTR) {
1914     return t;
1915   }
1916   return t->is_oopptr();
1917 }
1918 
1919 int ShenandoahIUBarrierNode::needed(Node* n) {
1920   if (n == nullptr ||
1921       n->is_Allocate() ||
1922       n->Opcode() == Op_ShenandoahIUBarrier ||
1923       n->bottom_type() == TypePtr::NULL_PTR ||
1924       (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) {
1925     return NotNeeded;
1926   }
1927   if (n->is_Phi() ||
1928       n->is_CMove()) {
1929     return MaybeNeeded;
1930   }
1931   return Needed;
1932 }
1933 
1934 Node* ShenandoahIUBarrierNode::next(Node* n) {
1935   for (;;) {
1936     if (n == nullptr) {
1937       return n;
1938     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
1939       return n;
1940     } else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) {
1941       return n;
1942     } else if (n->is_ConstraintCast() ||
1943                n->Opcode() == Op_DecodeN ||
1944                n->Opcode() == Op_EncodeP) {
1945       n = n->in(1);
1946     } else if (n->is_Proj()) {
1947       n = n->in(0);
1948     } else {
1949       return n;
1950     }
1951   }
1952   ShouldNotReachHere();
1953   return nullptr;
1954 }
1955 
1956 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
1957   PhaseIterGVN* igvn = phase->is_IterGVN();
1958 
1959   Node* n = next(in(1));
1960 
1961   int cont = needed(n);
1962 
1963   if (cont == NotNeeded) {
1964     return in(1);
1965   } else if (cont == MaybeNeeded) {
1966     if (igvn == nullptr) {
1967       phase->record_for_igvn(this);
1968       return this;
1969     } else {
1970       ResourceMark rm;
1971       Unique_Node_List wq;
1972       uint wq_i = 0;
1973 
1974       for (;;) {
1975         if (n->is_Phi()) {
1976           for (uint i = 1; i < n->req(); i++) {
1977             Node* m = n->in(i);
1978             if (m != nullptr) {
1979               wq.push(m);
1980             }
1981           }
1982         } else {
1983           assert(n->is_CMove(), "nothing else here");
1984           Node* m = n->in(CMoveNode::IfFalse);
1985           wq.push(m);
1986           m = n->in(CMoveNode::IfTrue);
1987           wq.push(m);
1988         }
1989         Node* orig_n = nullptr;
1990         do {
1991           if (wq_i >= wq.size()) {
1992             return in(1);
1993           }
1994           n = wq.at(wq_i);
1995           wq_i++;
1996           orig_n = n;
1997           n = next(n);
1998           cont = needed(n);
1999           if (cont == Needed) {
2000             return this;
2001           }
2002         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2003       }
2004     }
2005   }
2006 
2007   return this;
2008 }
2009 
2010 #ifdef ASSERT
2011 static bool has_never_branch(Node* root) {
2012   for (uint i = 1; i < root->req(); i++) {
2013     Node* in = root->in(i);
2014     if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
2015       return true;
2016     }
2017   }
2018   return false;
2019 }
2020 #endif
2021 
2022 void MemoryGraphFixer::collect_memory_nodes() {
2023   Node_Stack stack(0);
2024   VectorSet visited;
2025   Node_List regions;
2026 
2027   // Walk the raw memory graph and create a mapping from CFG node to
2028   // memory node. Exclude phis for now.
2029   stack.push(_phase->C->root(), 1);
2030   do {
2031     Node* n = stack.node();
2032     int opc = n->Opcode();
2033     uint i = stack.index();
2034     if (i < n->req()) {
2035       Node* mem = nullptr;
2036       if (opc == Op_Root) {
2037         Node* in = n->in(i);
2038         int in_opc = in->Opcode();
2039         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2040           mem = in->in(TypeFunc::Memory);
2041         } else if (in_opc == Op_Halt) {
2042           if (in->in(0)->is_Region()) {
2043             Node* r = in->in(0);
2044             for (uint j = 1; j < r->req(); j++) {
2045               assert(!r->in(j)->is_NeverBranch(), "");
2046             }
2047           } else {
2048             Node* proj = in->in(0);
2049             assert(proj->is_Proj(), "");
2050             Node* in = proj->in(0);
2051             assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2052             if (in->is_CallStaticJava()) {
2053               mem = in->in(TypeFunc::Memory);
2054             } else if (in->Opcode() == Op_Catch) {
2055               Node* call = in->in(0)->in(0);
2056               assert(call->is_Call(), "");
2057               mem = call->in(TypeFunc::Memory);
2058             } else if (in->is_NeverBranch()) {
2059               mem = collect_memory_for_infinite_loop(in);
2060             }
2061           }
2062         } else {
2063 #ifdef ASSERT
2064           n->dump();
2065           in->dump();
2066 #endif
2067           ShouldNotReachHere();
2068         }
2069       } else {
2070         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2071         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2072         mem = n->in(i);
2073       }
2074       i++;
2075       stack.set_index(i);
2076       if (mem == nullptr) {
2077         continue;
2078       }
2079       for (;;) {
2080         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2081           break;
2082         }
2083         if (mem->is_Phi()) {
2084           stack.push(mem, 2);
2085           mem = mem->in(1);
2086         } else if (mem->is_Proj()) {
2087           stack.push(mem, mem->req());
2088           mem = mem->in(0);
2089         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2090           mem = mem->in(TypeFunc::Memory);
2091         } else if (mem->is_MergeMem()) {
2092           MergeMemNode* mm = mem->as_MergeMem();
2093           mem = mm->memory_at(_alias);
2094         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2095           assert(_alias == Compile::AliasIdxRaw, "");
2096           stack.push(mem, mem->req());
2097           mem = mem->in(MemNode::Memory);
2098         } else {
2099 #ifdef ASSERT
2100           mem->dump();
2101 #endif
2102           ShouldNotReachHere();
2103         }
2104       }
2105     } else {
2106       if (n->is_Phi()) {
2107         // Nothing
2108       } else if (!n->is_Root()) {
2109         Node* c = get_ctrl(n);
2110         _memory_nodes.map(c->_idx, n);
2111       }
2112       stack.pop();
2113     }
2114   } while(stack.is_nonempty());
2115 
2116   // Iterate over CFG nodes in rpo and propagate memory state to
2117   // compute memory state at regions, creating new phis if needed.
2118   Node_List rpo_list;
2119   visited.clear();
2120   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2121   Node* root = rpo_list.pop();
2122   assert(root == _phase->C->root(), "");
2123 
2124   const bool trace = false;
2125 #ifdef ASSERT
2126   if (trace) {
2127     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2128       Node* c = rpo_list.at(i);
2129       if (_memory_nodes[c->_idx] != nullptr) {
2130         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2131       }
2132     }
2133   }
2134 #endif
2135   uint last = _phase->C->unique();
2136 
2137 #ifdef ASSERT
2138   uint16_t max_depth = 0;
2139   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2140     IdealLoopTree* lpt = iter.current();
2141     max_depth = MAX2(max_depth, lpt->_nest);
2142   }
2143 #endif
2144 
2145   bool progress = true;
2146   int iteration = 0;
2147   Node_List dead_phis;
2148   while (progress) {
2149     progress = false;
2150     iteration++;
2151     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2152     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2153 
2154     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2155       Node* c = rpo_list.at(i);
2156 
2157       Node* prev_mem = _memory_nodes[c->_idx];
2158       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2159         Node* prev_region = regions[c->_idx];
2160         Node* unique = nullptr;
2161         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2162           Node* m = _memory_nodes[c->in(j)->_idx];
2163           assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2164           if (m != nullptr) {
2165             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2166               assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2167               // continue
2168             } else if (unique == nullptr) {
2169               unique = m;
2170             } else if (m == unique) {
2171               // continue
2172             } else {
2173               unique = NodeSentinel;
2174             }
2175           }
2176         }
2177         assert(unique != nullptr, "empty phi???");
2178         if (unique != NodeSentinel) {
2179           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
2180             dead_phis.push(prev_region);
2181           }
2182           regions.map(c->_idx, unique);
2183         } else {
2184           Node* phi = nullptr;
2185           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2186             phi = prev_region;
2187             for (uint k = 1; k < c->req(); k++) {
2188               Node* m = _memory_nodes[c->in(k)->_idx];
2189               assert(m != nullptr, "expect memory state");
2190               phi->set_req(k, m);
2191             }
2192           } else {
2193             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
2194               Node* u = c->fast_out(j);
2195               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2196                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2197                 phi = u;
2198                 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
2199                   Node* m = _memory_nodes[c->in(k)->_idx];
2200                   assert(m != nullptr, "expect memory state");
2201                   if (u->in(k) != m) {
2202                     phi = NodeSentinel;
2203                   }
2204                 }
2205               }
2206             }
2207             if (phi == NodeSentinel) {
2208               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2209               for (uint k = 1; k < c->req(); k++) {
2210                 Node* m = _memory_nodes[c->in(k)->_idx];
2211                 assert(m != nullptr, "expect memory state");
2212                 phi->init_req(k, m);
2213               }
2214             }
2215           }
2216           if (phi != nullptr) {
2217             regions.map(c->_idx, phi);
2218           } else {
2219             assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2220           }
2221         }
2222         Node* current_region = regions[c->_idx];
2223         if (current_region != prev_region) {
2224           progress = true;
2225           if (prev_region == prev_mem) {
2226             _memory_nodes.map(c->_idx, current_region);
2227           }
2228         }
2229       } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2230         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2231         assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
2232         if (m != prev_mem) {
2233           _memory_nodes.map(c->_idx, m);
2234           progress = true;
2235         }
2236       }
2237 #ifdef ASSERT
2238       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2239 #endif
2240     }
2241   }
2242 
2243   // Replace existing phi with computed memory state for that region
2244   // if different (could be a new phi or a dominating memory node if
2245   // that phi was found to be useless).
2246   while (dead_phis.size() > 0) {
2247     Node* n = dead_phis.pop();
2248     n->replace_by(_phase->C->top());
2249     n->destruct(&_phase->igvn());
2250   }
2251   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2252     Node* c = rpo_list.at(i);
2253     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2254       Node* n = regions[c->_idx];
2255       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2256       if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2257         _phase->register_new_node(n, c);
2258       }
2259     }
2260   }
2261   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2262     Node* c = rpo_list.at(i);
2263     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2264       Node* n = regions[c->_idx];
2265       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2266       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2267         Node* u = c->fast_out(i);
2268         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2269             u != n) {
2270           assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
2271           if (u->adr_type() == TypePtr::BOTTOM) {
2272             fix_memory_uses(u, n, n, c);
2273           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2274             _phase->lazy_replace(u, n);
2275             --i; --imax;
2276           }
2277         }
2278       }
2279     }
2280   }
2281 }
2282 
2283 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
2284   Node* mem = nullptr;
2285   Node* head = in->in(0);
2286   assert(head->is_Region(), "unexpected infinite loop graph shape");
2287 
2288   Node* phi_mem = nullptr;
2289   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2290     Node* u = head->fast_out(j);
2291     if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2292       if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2293         assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2294         phi_mem = u;
2295       } else if (u->adr_type() == TypePtr::BOTTOM) {
2296         assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2297         if (phi_mem == nullptr) {
2298           phi_mem = u;
2299         }
2300       }
2301     }
2302   }
2303   if (phi_mem == nullptr) {
2304     ResourceMark rm;
2305     Node_Stack stack(0);
2306     stack.push(head, 1);
2307     do {
2308       Node* n = stack.node();
2309       uint i = stack.index();
2310       if (i >= n->req()) {
2311         stack.pop();
2312       } else {
2313         stack.set_index(i + 1);
2314         Node* c = n->in(i);
2315         assert(c != head, "should have found a safepoint on the way");
2316         if (stack.size() != 1 || _phase->is_dominator(head, c)) {
2317           for (;;) {
2318             if (c->is_Region()) {
2319               stack.push(c, 1);
2320               break;
2321             } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
2322               Node* m = c->in(TypeFunc::Memory);
2323               if (m->is_MergeMem()) {
2324                 m = m->as_MergeMem()->memory_at(_alias);
2325               }
2326               assert(mem == nullptr || mem == m, "several memory states");
2327               mem = m;
2328               break;
2329             } else {
2330               assert(c != c->in(0), "");
2331               c = c->in(0);
2332             }
2333           }
2334         }
2335       }
2336     } while (stack.size() > 0);
2337     assert(mem != nullptr, "should have found safepoint");
2338   } else {
2339     mem = phi_mem;
2340   }
2341   return mem;
2342 }
2343 
2344 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2345   Node* c = _phase->get_ctrl(n);
2346   if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
2347     assert(c == n->in(0), "");
2348     CallNode* call = c->as_Call();
2349     CallProjections projs;
2350     call->extract_projections(&projs, true, false);
2351     if (projs.catchall_memproj != nullptr) {
2352       if (projs.fallthrough_memproj == n) {
2353         c = projs.fallthrough_catchproj;
2354       } else {
2355         assert(projs.catchall_memproj == n, "");
2356         c = projs.catchall_catchproj;
2357       }
2358     }
2359   }
2360   return c;
2361 }
2362 
2363 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2364   if (_phase->has_ctrl(n))
2365     return get_ctrl(n);
2366   else {
2367     assert (n->is_CFG(), "must be a CFG node");
2368     return n;
2369   }
2370 }
2371 
2372 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2373   return m != nullptr && get_ctrl(m) == c;
2374 }
2375 
2376 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2377   assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
2378   assert(!ctrl->is_Call() || ctrl == n, "projection expected");
2379 #ifdef ASSERT
2380   if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
2381       (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
2382     CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
2383     int mems = 0;
2384     for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
2385       Node* u = call->fast_out(i);
2386       if (u->bottom_type() == Type::MEMORY) {
2387         mems++;
2388       }
2389     }
2390     assert(mems <= 1, "No node right after call if multiple mem projections");
2391   }
2392 #endif
2393   Node* mem = _memory_nodes[ctrl->_idx];
2394   Node* c = ctrl;
2395   while (!mem_is_valid(mem, c) &&
2396          (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2397     c = _phase->idom(c);
2398     mem = _memory_nodes[c->_idx];
2399   }
2400   if (n != nullptr && mem_is_valid(mem, c)) {
2401     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2402       mem = next_mem(mem, _alias);
2403     }
2404     if (mem->is_MergeMem()) {
2405       mem = mem->as_MergeMem()->memory_at(_alias);
2406     }
2407     if (!mem_is_valid(mem, c)) {
2408       do {
2409         c = _phase->idom(c);
2410         mem = _memory_nodes[c->_idx];
2411       } while (!mem_is_valid(mem, c) &&
2412                (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2413     }
2414   }
2415   assert(mem->bottom_type() == Type::MEMORY, "");
2416   return mem;
2417 }
2418 
2419 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2420   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2421     Node* use = region->fast_out(i);
2422     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2423         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2424       return true;
2425     }
2426   }
2427   return false;
2428 }
2429 
2430 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2431   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2432   const bool trace = false;
2433   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2434   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2435   GrowableArray<Node*> phis;
2436   if (mem_for_ctrl != mem) {
2437     Node* old = mem_for_ctrl;
2438     Node* prev = nullptr;
2439     while (old != mem) {
2440       prev = old;
2441       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2442         assert(_alias == Compile::AliasIdxRaw, "");
2443         old = old->in(MemNode::Memory);
2444       } else if (old->Opcode() == Op_SCMemProj) {
2445         assert(_alias == Compile::AliasIdxRaw, "");
2446         old = old->in(0);
2447       } else {
2448         ShouldNotReachHere();
2449       }
2450     }
2451     assert(prev != nullptr, "");
2452     if (new_ctrl != ctrl) {
2453       _memory_nodes.map(ctrl->_idx, mem);
2454       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2455     }
2456     uint input = (uint)MemNode::Memory;
2457     _phase->igvn().replace_input_of(prev, input, new_mem);
2458   } else {
2459     uses.clear();
2460     _memory_nodes.map(new_ctrl->_idx, new_mem);
2461     uses.push(new_ctrl);
2462     for(uint next = 0; next < uses.size(); next++ ) {
2463       Node *n = uses.at(next);
2464       assert(n->is_CFG(), "");
2465       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2466       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2467         Node* u = n->fast_out(i);
2468         if (!u->is_Root() && u->is_CFG() && u != n) {
2469           Node* m = _memory_nodes[u->_idx];
2470           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2471               !has_mem_phi(u) &&
2472               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2473             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2474             DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
2475 
2476             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2477               bool push = true;
2478               bool create_phi = true;
2479               if (_phase->is_dominator(new_ctrl, u)) {
2480                 create_phi = false;
2481               }
2482               if (create_phi) {
2483                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2484                 _phase->register_new_node(phi, u);
2485                 phis.push(phi);
2486                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2487                 if (!mem_is_valid(m, u)) {
2488                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2489                   _memory_nodes.map(u->_idx, phi);
2490                 } else {
2491                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2492                   for (;;) {
2493                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2494                     Node* next = nullptr;
2495                     if (m->is_Proj()) {
2496                       next = m->in(0);
2497                     } else {
2498                       assert(m->is_Mem() || m->is_LoadStore(), "");
2499                       assert(_alias == Compile::AliasIdxRaw, "");
2500                       next = m->in(MemNode::Memory);
2501                     }
2502                     if (_phase->get_ctrl(next) != u) {
2503                       break;
2504                     }
2505                     if (next->is_MergeMem()) {
2506                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2507                       break;
2508                     }
2509                     if (next->is_Phi()) {
2510                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2511                       break;
2512                     }
2513                     m = next;
2514                   }
2515 
2516                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2517                   assert(m->is_Mem() || m->is_LoadStore(), "");
2518                   uint input = (uint)MemNode::Memory;
2519                   _phase->igvn().replace_input_of(m, input, phi);
2520                   push = false;
2521                 }
2522               } else {
2523                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2524               }
2525               if (push) {
2526                 uses.push(u);
2527               }
2528             }
2529           } else if (!mem_is_valid(m, u) &&
2530                      !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2531             uses.push(u);
2532           }
2533         }
2534       }
2535     }
2536     for (int i = 0; i < phis.length(); i++) {
2537       Node* n = phis.at(i);
2538       Node* r = n->in(0);
2539       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2540       for (uint j = 1; j < n->req(); j++) {
2541         Node* m = find_mem(r->in(j), nullptr);
2542         _phase->igvn().replace_input_of(n, j, m);
2543         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2544       }
2545     }
2546   }
2547   uint last = _phase->C->unique();
2548   MergeMemNode* mm = nullptr;
2549   int alias = _alias;
2550   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2551   // Process loads first to not miss an anti-dependency: if the memory
2552   // edge of a store is updated before a load is processed then an
2553   // anti-dependency may be missed.
2554   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2555     Node* u = mem->out(i);
2556     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2557       Node* m = find_mem(_phase->get_ctrl(u), u);
2558       if (m != mem) {
2559         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2560         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2561         --i;
2562       }
2563     }
2564   }
2565   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2566     Node* u = mem->out(i);
2567     if (u->_idx < last) {
2568       if (u->is_Mem()) {
2569         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2570           Node* m = find_mem(_phase->get_ctrl(u), u);
2571           if (m != mem) {
2572             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2573             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2574             --i;
2575           }
2576         }
2577       } else if (u->is_MergeMem()) {
2578         MergeMemNode* u_mm = u->as_MergeMem();
2579         if (u_mm->memory_at(alias) == mem) {
2580           MergeMemNode* newmm = nullptr;
2581           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2582             Node* uu = u->fast_out(j);
2583             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2584             if (uu->is_Phi()) {
2585               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2586               Node* region = uu->in(0);
2587               int nb = 0;
2588               for (uint k = 1; k < uu->req(); k++) {
2589                 if (uu->in(k) == u) {
2590                   Node* m = find_mem(region->in(k), nullptr);
2591                   if (m != mem) {
2592                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2593                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2594                     if (newmm != u) {
2595                       _phase->igvn().replace_input_of(uu, k, newmm);
2596                       nb++;
2597                       --jmax;
2598                     }
2599                   }
2600                 }
2601               }
2602               if (nb > 0) {
2603                 --j;
2604               }
2605             } else {
2606               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2607               if (m != mem) {
2608                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2609                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2610                 if (newmm != u) {
2611                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2612                   --j, --jmax;
2613                 }
2614               }
2615             }
2616           }
2617         }
2618       } else if (u->is_Phi()) {
2619         assert(u->bottom_type() == Type::MEMORY, "what else?");
2620         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2621           Node* region = u->in(0);
2622           bool replaced = false;
2623           for (uint j = 1; j < u->req(); j++) {
2624             if (u->in(j) == mem) {
2625               Node* m = find_mem(region->in(j), nullptr);
2626               Node* nnew = m;
2627               if (m != mem) {
2628                 if (u->adr_type() == TypePtr::BOTTOM) {
2629                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2630                   nnew = mm;
2631                 }
2632                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2633                 _phase->igvn().replace_input_of(u, j, nnew);
2634                 replaced = true;
2635               }
2636             }
2637           }
2638           if (replaced) {
2639             --i;
2640           }
2641         }
2642       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2643                  u->adr_type() == nullptr) {
2644         assert(u->adr_type() != nullptr ||
2645                u->Opcode() == Op_Rethrow ||
2646                u->Opcode() == Op_Return ||
2647                u->Opcode() == Op_SafePoint ||
2648                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2649                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2650                u->Opcode() == Op_CallLeaf, "");
2651         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2652         if (m != mem) {
2653           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2654           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2655           --i;
2656         }
2657       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2658         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2659         if (m != mem) {
2660           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2661           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2662           --i;
2663         }
2664       } else if (u->adr_type() != TypePtr::BOTTOM &&
2665                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2666         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2667         assert(m != mem, "");
2668         // u is on the wrong slice...
2669         assert(u->is_ClearArray(), "");
2670         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2671         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2672         --i;
2673       }
2674     }
2675   }
2676 #ifdef ASSERT
2677   assert(new_mem->outcnt() > 0, "");
2678   for (int i = 0; i < phis.length(); i++) {
2679     Node* n = phis.at(i);
2680     assert(n->outcnt() > 0, "new phi must have uses now");
2681   }
2682 #endif
2683 }
2684 
2685 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2686   if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2687     _memory_nodes.map(ctrl->_idx, mem);
2688     _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2689   }
2690 }
2691 
2692 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2693   MergeMemNode* mm = MergeMemNode::make(mem);
2694   mm->set_memory_at(_alias, rep_proj);
2695   _phase->register_new_node(mm, rep_ctrl);
2696   return mm;
2697 }
2698 
2699 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2700   MergeMemNode* newmm = nullptr;
2701   MergeMemNode* u_mm = u->as_MergeMem();
2702   Node* c = _phase->get_ctrl(u);
2703   if (_phase->is_dominator(c, rep_ctrl)) {
2704     c = rep_ctrl;
2705   } else {
2706     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2707   }
2708   if (u->outcnt() == 1) {
2709     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2710       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2711       --i;
2712     } else {
2713       _phase->igvn().rehash_node_delayed(u);
2714       u_mm->set_memory_at(_alias, rep_proj);
2715     }
2716     newmm = u_mm;
2717     _phase->set_ctrl_and_loop(u, c);
2718   } else {
2719     // can't simply clone u and then change one of its input because
2720     // it adds and then removes an edge which messes with the
2721     // DUIterator
2722     newmm = MergeMemNode::make(u_mm->base_memory());
2723     for (uint j = 0; j < u->req(); j++) {
2724       if (j < newmm->req()) {
2725         if (j == (uint)_alias) {
2726           newmm->set_req(j, rep_proj);
2727         } else if (newmm->in(j) != u->in(j)) {
2728           newmm->set_req(j, u->in(j));
2729         }
2730       } else if (j == (uint)_alias) {
2731         newmm->add_req(rep_proj);
2732       } else {
2733         newmm->add_req(u->in(j));
2734       }
2735     }
2736     if ((uint)_alias >= u->req()) {
2737       newmm->set_memory_at(_alias, rep_proj);
2738     }
2739     _phase->register_new_node(newmm, c);
2740   }
2741   return newmm;
2742 }
2743 
2744 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2745   if (phi->adr_type() == TypePtr::BOTTOM) {
2746     Node* region = phi->in(0);
2747     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2748       Node* uu = region->fast_out(j);
2749       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2750         return false;
2751       }
2752     }
2753     return true;
2754   }
2755   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2756 }
2757 
2758 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2759   uint last = _phase-> C->unique();
2760   MergeMemNode* mm = nullptr;
2761   assert(mem->bottom_type() == Type::MEMORY, "");
2762   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2763     Node* u = mem->out(i);
2764     if (u != replacement && u->_idx < last) {
2765       if (u->is_MergeMem()) {
2766         MergeMemNode* u_mm = u->as_MergeMem();
2767         if (u_mm->memory_at(_alias) == mem) {
2768           MergeMemNode* newmm = nullptr;
2769           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2770             Node* uu = u->fast_out(j);
2771             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2772             if (uu->is_Phi()) {
2773               if (should_process_phi(uu)) {
2774                 Node* region = uu->in(0);
2775                 int nb = 0;
2776                 for (uint k = 1; k < uu->req(); k++) {
2777                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2778                     if (newmm == nullptr) {
2779                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2780                     }
2781                     if (newmm != u) {
2782                       _phase->igvn().replace_input_of(uu, k, newmm);
2783                       nb++;
2784                       --jmax;
2785                     }
2786                   }
2787                 }
2788                 if (nb > 0) {
2789                   --j;
2790                 }
2791               }
2792             } else {
2793               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2794                 if (newmm == nullptr) {
2795                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2796                 }
2797                 if (newmm != u) {
2798                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2799                   --j, --jmax;
2800                 }
2801               }
2802             }
2803           }
2804         }
2805       } else if (u->is_Phi()) {
2806         assert(u->bottom_type() == Type::MEMORY, "what else?");
2807         Node* region = u->in(0);
2808         if (should_process_phi(u)) {
2809           bool replaced = false;
2810           for (uint j = 1; j < u->req(); j++) {
2811             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2812               Node* nnew = rep_proj;
2813               if (u->adr_type() == TypePtr::BOTTOM) {
2814                 if (mm == nullptr) {
2815                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2816                 }
2817                 nnew = mm;
2818               }
2819               _phase->igvn().replace_input_of(u, j, nnew);
2820               replaced = true;
2821             }
2822           }
2823           if (replaced) {
2824             --i;
2825           }
2826 
2827         }
2828       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2829                  u->adr_type() == nullptr) {
2830         assert(u->adr_type() != nullptr ||
2831                u->Opcode() == Op_Rethrow ||
2832                u->Opcode() == Op_Return ||
2833                u->Opcode() == Op_SafePoint ||
2834                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2835                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2836                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2837         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2838           if (mm == nullptr) {
2839             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2840           }
2841           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2842           --i;
2843         }
2844       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2845         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2846           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2847           --i;
2848         }
2849       }
2850     }
2851   }
2852 }
2853 
2854 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2855 : Node(ctrl, obj), _decorators(decorators) {
2856   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2857 }
2858 
2859 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2860   return _decorators;
2861 }
2862 
2863 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2864   return sizeof(*this);
2865 }
2866 
2867 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2868   return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2869 }
2870 
2871 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2872   uint hash = Node::hash();
2873   hash += mask_decorators(_decorators);
2874   return hash;
2875 }
2876 
2877 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2878   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2879          mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2880 }
2881 
2882 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2883   if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2884     return Type::TOP;
2885   }
2886   const Type* t = in(ValueIn)->bottom_type();
2887   if (t == TypePtr::NULL_PTR) {
2888     return t;
2889   }
2890 
2891   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2892     return t;
2893   }
2894 
2895   return t->meet(TypePtr::NULL_PTR);
2896 }
2897 
2898 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2899   // Either input is TOP ==> the result is TOP
2900   const Type *t2 = phase->type(in(ValueIn));
2901   if( t2 == Type::TOP ) return Type::TOP;
2902 
2903   if (t2 == TypePtr::NULL_PTR) {
2904     return t2;
2905   }
2906 
2907   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2908     return t2;
2909   }
2910 
2911   return t2->meet(TypePtr::NULL_PTR);
2912 }
2913 
2914 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2915   Node* value = in(ValueIn);
2916   if (!needs_barrier(phase, value)) {
2917     return value;
2918   }
2919   return this;
2920 }
2921 
2922 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2923   Unique_Node_List visited;
2924   return needs_barrier_impl(phase, n, visited);
2925 }
2926 
2927 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2928   if (n == nullptr) return false;
2929   if (visited.member(n)) {
2930     return false; // Been there.
2931   }
2932   visited.push(n);
2933 
2934   if (n->is_Allocate()) {
2935     // tty->print_cr("optimize barrier on alloc");
2936     return false;
2937   }
2938   if (n->is_Call()) {
2939     // tty->print_cr("optimize barrier on call");
2940     return false;
2941   }
2942 
2943   const Type* type = phase->type(n);
2944   if (type == Type::TOP) {
2945     return false;
2946   }
2947   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2948     // tty->print_cr("optimize barrier on null");
2949     return false;
2950   }
2951   if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2952     // tty->print_cr("optimize barrier on constant");
2953     return false;
2954   }
2955 
2956   switch (n->Opcode()) {
2957     case Op_AddP:
2958       return true; // TODO: Can refine?
2959     case Op_LoadP:
2960     case Op_ShenandoahCompareAndExchangeN:
2961     case Op_ShenandoahCompareAndExchangeP:
2962     case Op_CompareAndExchangeN:
2963     case Op_CompareAndExchangeP:
2964     case Op_GetAndSetN:
2965     case Op_GetAndSetP:
2966       return true;
2967     case Op_Phi: {
2968       for (uint i = 1; i < n->req(); i++) {
2969         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2970       }
2971       return false;
2972     }
2973     case Op_CheckCastPP:
2974     case Op_CastPP:
2975       return needs_barrier_impl(phase, n->in(1), visited);
2976     case Op_Proj:
2977       return needs_barrier_impl(phase, n->in(0), visited);
2978     case Op_ShenandoahLoadReferenceBarrier:
2979       // tty->print_cr("optimize barrier on barrier");
2980       return false;
2981     case Op_Parm:
2982       // tty->print_cr("optimize barrier on input arg");
2983       return false;
2984     case Op_DecodeN:
2985     case Op_EncodeP:
2986       return needs_barrier_impl(phase, n->in(1), visited);
2987     case Op_LoadN:
2988       return true;
2989     case Op_CMoveN:
2990     case Op_CMoveP:
2991       return needs_barrier_impl(phase, n->in(2), visited) ||
2992              needs_barrier_impl(phase, n->in(3), visited);
2993     case Op_ShenandoahIUBarrier:
2994       return needs_barrier_impl(phase, n->in(1), visited);
2995     case Op_CreateEx:
2996       return false;
2997     default:
2998       break;
2999   }
3000 #ifdef ASSERT
3001   tty->print("need barrier on?: ");
3002   tty->print_cr("ins:");
3003   n->dump(2);
3004   tty->print_cr("outs:");
3005   n->dump(-2);
3006   ShouldNotReachHere();
3007 #endif
3008   return true;
3009 }