1 /*
   2  * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  32 #include "gc/shenandoah/shenandoahForwarding.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahRuntime.hpp"
  36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/callnode.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 
  47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  48   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  49   if ((state->iu_barriers_count() +
  50        state->load_reference_barriers_count()) > 0) {
  51     assert(C->post_loop_opts_phase(), "no loop opts allowed");
  52     C->reset_post_loop_opts_phase(); // ... but we know what we are doing
  53     C->clear_major_progress();
  54     PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
  55     if (C->failing()) return false;
  56 
  57     C->set_major_progress();
  58     if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  59       return false;
  60     }
  61     C->clear_major_progress();
  62     C->process_for_post_loop_opts_igvn(igvn);
  63     if (C->failing()) return false;
  64 
  65     C->set_post_loop_opts_phase(); // now for real!
  66   }
  67   return true;
  68 }
  69 
  70 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  71   if (!UseShenandoahGC) {
  72     return false;
  73   }
  74   assert(iff->is_If(), "bad input");
  75   if (iff->Opcode() != Op_If) {
  76     return false;
  77   }
  78   Node* bol = iff->in(1);
  79   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  80     return false;
  81   }
  82   Node* cmp = bol->in(1);
  83   if (cmp->Opcode() != Op_CmpI) {
  84     return false;
  85   }
  86   Node* in1 = cmp->in(1);
  87   Node* in2 = cmp->in(2);
  88   if (in2->find_int_con(-1) != 0) {
  89     return false;
  90   }
  91   if (in1->Opcode() != Op_AndI) {
  92     return false;
  93   }
  94   in2 = in1->in(2);
  95   if (in2->find_int_con(-1) != mask) {
  96     return false;
  97   }
  98   in1 = in1->in(1);
  99 
 100   return is_gc_state_load(in1);
 101 }
 102 
 103 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 104   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 105 }
 106 
 107 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 108   if (!UseShenandoahGC) {
 109     return false;
 110   }
 111   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 112     return false;
 113   }
 114   Node* addp = n->in(MemNode::Address);
 115   if (!addp->is_AddP()) {
 116     return false;
 117   }
 118   Node* base = addp->in(AddPNode::Address);
 119   Node* off = addp->in(AddPNode::Offset);
 120   if (base->Opcode() != Op_ThreadLocal) {
 121     return false;
 122   }
 123   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 124     return false;
 125   }
 126   return true;
 127 }
 128 
 129 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 130   assert(phase->is_dominator(stop, start), "bad inputs");
 131   ResourceMark rm;
 132   Unique_Node_List wq;
 133   wq.push(start);
 134   for (uint next = 0; next < wq.size(); next++) {
 135     Node *m = wq.at(next);
 136     if (m == stop) {
 137       continue;
 138     }
 139     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 140       return true;
 141     }
 142     if (m->is_Region()) {
 143       for (uint i = 1; i < m->req(); i++) {
 144         wq.push(m->in(i));
 145       }
 146     } else {
 147       wq.push(m->in(0));
 148     }
 149   }
 150   return false;
 151 }
 152 
 153 #ifdef ASSERT
 154 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 155   assert(phis.size() == 0, "");
 156 
 157   while (true) {
 158     if (in->bottom_type() == TypePtr::NULL_PTR) {
 159       if (trace) {tty->print_cr("null");}
 160     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 161       if (trace) {tty->print_cr("Non oop");}
 162     } else {
 163       if (in->is_ConstraintCast()) {
 164         in = in->in(1);
 165         continue;
 166       } else if (in->is_AddP()) {
 167         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 168         in = in->in(AddPNode::Address);
 169         continue;
 170       } else if (in->is_Con()) {
 171         if (trace) {
 172           tty->print("Found constant");
 173           in->dump();
 174         }
 175       } else if (in->Opcode() == Op_Parm) {
 176         if (trace) {
 177           tty->print("Found argument");
 178         }
 179       } else if (in->Opcode() == Op_CreateEx) {
 180         if (trace) {
 181           tty->print("Found create-exception");
 182         }
 183       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 184         if (trace) {
 185           tty->print("Found raw LoadP (OSR argument?)");
 186         }
 187       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 188         if (t == ShenandoahOopStore) {
 189           uint i = 0;
 190           for (; i < phis.size(); i++) {
 191             Node* n = phis.node_at(i);
 192             if (n->Opcode() == Op_ShenandoahIUBarrier) {
 193               break;
 194             }
 195           }
 196           if (i == phis.size()) {
 197             return false;
 198           }
 199         }
 200         barriers_used.push(in);
 201         if (trace) {tty->print("Found barrier"); in->dump();}
 202       } else if (in->Opcode() == Op_ShenandoahIUBarrier) {
 203         if (t != ShenandoahOopStore) {
 204           in = in->in(1);
 205           continue;
 206         }
 207         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 208         phis.push(in, in->req());
 209         in = in->in(1);
 210         continue;
 211       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 212         if (trace) {
 213           tty->print("Found alloc");
 214           in->in(0)->dump();
 215         }
 216       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 217         if (trace) {
 218           tty->print("Found Java call");
 219         }
 220       } else if (in->is_Phi()) {
 221         if (!visited.test_set(in->_idx)) {
 222           if (trace) {tty->print("Pushed phi:"); in->dump();}
 223           phis.push(in, 2);
 224           in = in->in(1);
 225           continue;
 226         }
 227         if (trace) {tty->print("Already seen phi:"); in->dump();}
 228       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 229         if (!visited.test_set(in->_idx)) {
 230           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 231           phis.push(in, CMoveNode::IfTrue);
 232           in = in->in(CMoveNode::IfFalse);
 233           continue;
 234         }
 235         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 236       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 237         in = in->in(1);
 238         continue;
 239       } else {
 240         return false;
 241       }
 242     }
 243     bool cont = false;
 244     while (phis.is_nonempty()) {
 245       uint idx = phis.index();
 246       Node* phi = phis.node();
 247       if (idx >= phi->req()) {
 248         if (trace) {tty->print("Popped phi:"); phi->dump();}
 249         phis.pop();
 250         continue;
 251       }
 252       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 253       in = phi->in(idx);
 254       phis.set_index(idx+1);
 255       cont = true;
 256       break;
 257     }
 258     if (!cont) {
 259       break;
 260     }
 261   }
 262   return true;
 263 }
 264 
 265 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 266   if (n1 != nullptr) {
 267     n1->dump(+10);
 268   }
 269   if (n2 != nullptr) {
 270     n2->dump(+10);
 271   }
 272   fatal("%s", msg);
 273 }
 274 
 275 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 276   ResourceMark rm;
 277   Unique_Node_List wq;
 278   GrowableArray<Node*> barriers;
 279   Unique_Node_List barriers_used;
 280   Node_Stack phis(0);
 281   VectorSet visited;
 282   const bool trace = false;
 283   const bool verify_no_useless_barrier = false;
 284 
 285   wq.push(root);
 286   for (uint next = 0; next < wq.size(); next++) {
 287     Node *n = wq.at(next);
 288     if (n->is_Load()) {
 289       const bool trace = false;
 290       if (trace) {tty->print("Verifying"); n->dump();}
 291       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 292         if (trace) {tty->print_cr("Load range/klass");}
 293       } else {
 294         const TypePtr* adr_type = n->as_Load()->adr_type();
 295 
 296         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 297           if (trace) {tty->print_cr("Mark load");}
 298         } else if (adr_type->isa_instptr() &&
 299                    adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 300                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
 301           if (trace) {tty->print_cr("Reference.get()");}
 302         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 303           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 304         }
 305       }
 306     } else if (n->is_Store()) {
 307       const bool trace = false;
 308 
 309       if (trace) {tty->print("Verifying"); n->dump();}
 310       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 311         Node* adr = n->in(MemNode::Address);
 312         bool verify = true;
 313 
 314         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 315           adr = adr->in(AddPNode::Address);
 316           if (adr->is_AddP()) {
 317             assert(adr->in(AddPNode::Base)->is_top(), "");
 318             adr = adr->in(AddPNode::Address);
 319             if (adr->Opcode() == Op_LoadP &&
 320                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 321                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 323               if (trace) {tty->print_cr("SATB prebarrier");}
 324               verify = false;
 325             }
 326           }
 327         }
 328 
 329         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 330           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 331         }
 332       }
 333       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 334         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 335       }
 336     } else if (n->Opcode() == Op_CmpP) {
 337       const bool trace = false;
 338 
 339       Node* in1 = n->in(1);
 340       Node* in2 = n->in(2);
 341       if (in1->bottom_type()->isa_oopptr()) {
 342         if (trace) {tty->print("Verifying"); n->dump();}
 343 
 344         bool mark_inputs = false;
 345         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 346             (in1->is_Con() || in2->is_Con())) {
 347           if (trace) {tty->print_cr("Comparison against a constant");}
 348           mark_inputs = true;
 349         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 350                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 351           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 352           mark_inputs = true;
 353         } else {
 354           assert(in2->bottom_type()->isa_oopptr(), "");
 355 
 356           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 357               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 358             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 359           }
 360         }
 361         if (verify_no_useless_barrier &&
 362             mark_inputs &&
 363             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 364              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 365           phis.clear();
 366           visited.reset();
 367         }
 368       }
 369     } else if (n->is_LoadStore()) {
 370       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 371           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 372         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 373       }
 374 
 375       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 376         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 377       }
 378     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 379       CallNode* call = n->as_Call();
 380 
 381       static struct {
 382         const char* name;
 383         struct {
 384           int pos;
 385           verify_type t;
 386         } args[6];
 387       } calls[] = {
 388         "array_partition_stub",
 389         { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore },   { -1, ShenandoahNone },
 390           { -1, ShenandoahNone },                { -1, ShenandoahNone },                  { -1, ShenandoahNone } },
 391         "arraysort_stub",
 392         { { TypeFunc::Parms, ShenandoahStore },  { -1, ShenandoahNone },                  { -1, ShenandoahNone },
 393           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 394         "aescrypt_encryptBlock",
 395         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 396           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 397         "aescrypt_decryptBlock",
 398         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 399           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 400         "multiplyToLen",
 401         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 402           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 403         "squareToLen",
 404         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 405           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 406         "montgomery_multiply",
 407         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 408           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 409         "montgomery_square",
 410         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 411           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 412         "mulAdd",
 413         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 414           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 415         "vectorizedMismatch",
 416         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 417           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 418         "updateBytesCRC32",
 419         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 420           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 421         "updateBytesAdler32",
 422         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 423           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 424         "updateBytesCRC32C",
 425         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 426           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 427         "counterMode_AESCrypt",
 428         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 429           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 430         "cipherBlockChaining_encryptAESCrypt",
 431         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 432           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 433         "cipherBlockChaining_decryptAESCrypt",
 434         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 435           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 436         "shenandoah_clone",
 437         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 438           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 439         "ghash_processBlocks",
 440         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 441           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 442         "sha1_implCompress",
 443         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 444           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 445         "sha256_implCompress",
 446         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 447           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 448         "sha512_implCompress",
 449         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 450           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 451         "sha1_implCompressMB",
 452         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 453           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 454         "sha256_implCompressMB",
 455         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 456           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 457         "sha512_implCompressMB",
 458         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 459           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 460         "encodeBlock",
 461         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 462           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 463         "decodeBlock",
 464         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 465           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 466       };
 467 
 468       if (call->is_call_to_arraycopystub()) {
 469         Node* dest = nullptr;
 470         const TypeTuple* args = n->as_Call()->_tf->domain();
 471         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 472           if (args->field_at(i)->isa_ptr()) {
 473             j++;
 474             if (j == 2) {
 475               dest = n->in(i);
 476               break;
 477             }
 478           }
 479         }
 480         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 481             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 482           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 483         }
 484       } else if (strlen(call->_name) > 5 &&
 485                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 486         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 487           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 488         }
 489       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 490         // skip
 491       } else {
 492         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 493         int i = 0;
 494         for (; i < calls_len; i++) {
 495           if (!strcmp(calls[i].name, call->_name)) {
 496             break;
 497           }
 498         }
 499         if (i != calls_len) {
 500           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 501           for (uint j = 0; j < args_len; j++) {
 502             int pos = calls[i].args[j].pos;
 503             if (pos == -1) {
 504               break;
 505             }
 506             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 507               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 508             }
 509           }
 510           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 511             if (call->in(j)->bottom_type()->make_ptr() &&
 512                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 513               uint k = 0;
 514               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 515               if (k == args_len) {
 516                 fatal("arg %d for call %s not covered", j, call->_name);
 517               }
 518             }
 519           }
 520         } else {
 521           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 522             if (call->in(j)->bottom_type()->make_ptr() &&
 523                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 524               fatal("%s not covered", call->_name);
 525             }
 526           }
 527         }
 528       }
 529     } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 530       // skip
 531     } else if (n->is_AddP()
 532                || n->is_Phi()
 533                || n->is_ConstraintCast()
 534                || n->Opcode() == Op_Return
 535                || n->Opcode() == Op_CMoveP
 536                || n->Opcode() == Op_CMoveN
 537                || n->Opcode() == Op_Rethrow
 538                || n->is_MemBar()
 539                || n->Opcode() == Op_Conv2B
 540                || n->Opcode() == Op_SafePoint
 541                || n->is_CallJava()
 542                || n->Opcode() == Op_Unlock
 543                || n->Opcode() == Op_EncodeP
 544                || n->Opcode() == Op_DecodeN) {
 545       // nothing to do
 546     } else {
 547       static struct {
 548         int opcode;
 549         struct {
 550           int pos;
 551           verify_type t;
 552         } inputs[2];
 553       } others[] = {
 554         Op_FastLock,
 555         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 556         Op_Lock,
 557         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 558         Op_ArrayCopy,
 559         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 560         Op_StrCompressedCopy,
 561         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 562         Op_StrInflatedCopy,
 563         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 564         Op_AryEq,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 566         Op_StrIndexOf,
 567         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 568         Op_StrComp,
 569         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 570         Op_StrEquals,
 571         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 572         Op_VectorizedHashCode,
 573         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 574         Op_EncodeISOArray,
 575         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 576         Op_CountPositives,
 577         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 578         Op_CastP2X,
 579         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 580         Op_StrIndexOfChar,
 581         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 582       };
 583 
 584       const int others_len = sizeof(others) / sizeof(others[0]);
 585       int i = 0;
 586       for (; i < others_len; i++) {
 587         if (others[i].opcode == n->Opcode()) {
 588           break;
 589         }
 590       }
 591       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 592       if (i != others_len) {
 593         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 594         for (uint j = 0; j < inputs_len; j++) {
 595           int pos = others[i].inputs[j].pos;
 596           if (pos == -1) {
 597             break;
 598           }
 599           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 600             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 601           }
 602         }
 603         for (uint j = 1; j < stop; j++) {
 604           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 605               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 606             uint k = 0;
 607             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 608             if (k == inputs_len) {
 609               fatal("arg %d for node %s not covered", j, n->Name());
 610             }
 611           }
 612         }
 613       } else {
 614         for (uint j = 1; j < stop; j++) {
 615           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 616               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 617             fatal("%s not covered", n->Name());
 618           }
 619         }
 620       }
 621     }
 622 
 623     if (n->is_SafePoint()) {
 624       SafePointNode* sfpt = n->as_SafePoint();
 625       if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
 626         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 627           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 628             phis.clear();
 629             visited.reset();
 630           }
 631         }
 632       }
 633     }
 634   }
 635 
 636   if (verify_no_useless_barrier) {
 637     for (int i = 0; i < barriers.length(); i++) {
 638       Node* n = barriers.at(i);
 639       if (!barriers_used.member(n)) {
 640         tty->print("XXX useless barrier"); n->dump(-2);
 641         ShouldNotReachHere();
 642       }
 643     }
 644   }
 645 }
 646 #endif
 647 
 648 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 649   // That both nodes have the same control is not sufficient to prove
 650   // domination, verify that there's no path from d to n
 651   ResourceMark rm;
 652   Unique_Node_List wq;
 653   wq.push(d);
 654   for (uint next = 0; next < wq.size(); next++) {
 655     Node *m = wq.at(next);
 656     if (m == n) {
 657       return false;
 658     }
 659     if (m->is_Phi() && m->in(0)->is_Loop()) {
 660       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 661     } else {
 662       if (m->is_Store() || m->is_LoadStore()) {
 663         // Take anti-dependencies into account
 664         Node* mem = m->in(MemNode::Memory);
 665         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 666           Node* u = mem->fast_out(i);
 667           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 668               phase->ctrl_or_self(u) == c) {
 669             wq.push(u);
 670           }
 671         }
 672       }
 673       for (uint i = 0; i < m->req(); i++) {
 674         if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
 675           wq.push(m->in(i));
 676         }
 677       }
 678     }
 679   }
 680   return true;
 681 }
 682 
 683 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 684   if (d_c != n_c) {
 685     return phase->is_dominator(d_c, n_c);
 686   }
 687   return is_dominator_same_ctrl(d_c, d, n, phase);
 688 }
 689 
 690 Node* next_mem(Node* mem, int alias) {
 691   Node* res = nullptr;
 692   if (mem->is_Proj()) {
 693     res = mem->in(0);
 694   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 695     res = mem->in(TypeFunc::Memory);
 696   } else if (mem->is_Phi()) {
 697     res = mem->in(1);
 698   } else if (mem->is_MergeMem()) {
 699     res = mem->as_MergeMem()->memory_at(alias);
 700   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 701     assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 702     res = mem->in(MemNode::Memory);
 703   } else {
 704 #ifdef ASSERT
 705     mem->dump();
 706 #endif
 707     ShouldNotReachHere();
 708   }
 709   return res;
 710 }
 711 
 712 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 713   Node* iffproj = nullptr;
 714   while (c != dom) {
 715     Node* next = phase->idom(c);
 716     assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 717     if (c->is_Region()) {
 718       ResourceMark rm;
 719       Unique_Node_List wq;
 720       wq.push(c);
 721       for (uint i = 0; i < wq.size(); i++) {
 722         Node *n = wq.at(i);
 723         if (n == next) {
 724           continue;
 725         }
 726         if (n->is_Region()) {
 727           for (uint j = 1; j < n->req(); j++) {
 728             wq.push(n->in(j));
 729           }
 730         } else {
 731           wq.push(n->in(0));
 732         }
 733       }
 734       for (uint i = 0; i < wq.size(); i++) {
 735         Node *n = wq.at(i);
 736         assert(n->is_CFG(), "");
 737         if (n->is_Multi()) {
 738           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 739             Node* u = n->fast_out(j);
 740             if (u->is_CFG()) {
 741               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 742                 return NodeSentinel;
 743               }
 744             }
 745           }
 746         }
 747       }
 748     } else  if (c->is_Proj()) {
 749       if (c->is_IfProj()) {
 750         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
 751           // continue;
 752         } else {
 753           if (!allow_one_proj) {
 754             return NodeSentinel;
 755           }
 756           if (iffproj == nullptr) {
 757             iffproj = c;
 758           } else {
 759             return NodeSentinel;
 760           }
 761         }
 762       } else if (c->Opcode() == Op_JumpProj) {
 763         return NodeSentinel; // unsupported
 764       } else if (c->Opcode() == Op_CatchProj) {
 765         return NodeSentinel; // unsupported
 766       } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
 767         return NodeSentinel; // unsupported
 768       } else {
 769         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 770       }
 771     }
 772     c = next;
 773   }
 774   return iffproj;
 775 }
 776 
 777 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 778   ResourceMark rm;
 779   VectorSet wq;
 780   wq.set(mem->_idx);
 781   mem_ctrl = phase->ctrl_or_self(mem);
 782   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 783     mem = next_mem(mem, alias);
 784     if (wq.test_set(mem->_idx)) {
 785       return nullptr;
 786     }
 787     mem_ctrl = phase->ctrl_or_self(mem);
 788   }
 789   if (mem->is_MergeMem()) {
 790     mem = mem->as_MergeMem()->memory_at(alias);
 791     mem_ctrl = phase->ctrl_or_self(mem);
 792   }
 793   return mem;
 794 }
 795 
 796 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 797   Node* mem = nullptr;
 798   Node* c = ctrl;
 799   do {
 800     if (c->is_Region()) {
 801       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
 802         Node* u = c->fast_out(i);
 803         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 804           if (u->adr_type() == TypePtr::BOTTOM) {
 805             mem = u;
 806           }
 807         }
 808       }
 809     } else {
 810       if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
 811         CallProjections projs;
 812         c->as_Call()->extract_projections(&projs, true, false);
 813         if (projs.fallthrough_memproj != nullptr) {
 814           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 815             if (projs.catchall_memproj == nullptr) {
 816               mem = projs.fallthrough_memproj;
 817             } else {
 818               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 819                 mem = projs.fallthrough_memproj;
 820               } else {
 821                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 822                 mem = projs.catchall_memproj;
 823               }
 824             }
 825           }
 826         } else {
 827           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 828           if (proj != nullptr &&
 829               proj->adr_type() == TypePtr::BOTTOM) {
 830             mem = proj;
 831           }
 832         }
 833       } else {
 834         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 835           Node* u = c->fast_out(i);
 836           if (u->is_Proj() &&
 837               u->bottom_type() == Type::MEMORY &&
 838               u->adr_type() == TypePtr::BOTTOM) {
 839               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 840               assert(mem == nullptr, "only one proj");
 841               mem = u;
 842           }
 843         }
 844         assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
 845       }
 846     }
 847     c = phase->idom(c);
 848   } while (mem == nullptr);
 849   return mem;
 850 }
 851 
 852 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 853   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 854     Node* u = n->fast_out(i);
 855     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 856       uses.push(u);
 857     }
 858   }
 859 }
 860 
 861 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 862   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 863   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 864   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 865   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 866   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 867   phase->lazy_replace(outer, new_outer);
 868   phase->lazy_replace(le, new_le);
 869   inner->clear_strip_mined();
 870 }
 871 
 872 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 873                                                PhaseIdealLoop* phase, int flags) {
 874   PhaseIterGVN& igvn = phase->igvn();
 875   Node* old_ctrl = ctrl;
 876 
 877   Node* thread          = new ThreadLocalNode();
 878   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 879   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 880   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 881                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 882                                         TypeInt::BYTE, MemNode::unordered);
 883   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 884   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 885   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 886 
 887   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 888   ctrl                  = new IfTrueNode(gc_state_iff);
 889   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 890 
 891   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 892   phase->register_control(gc_state_iff,   loop, old_ctrl);
 893   phase->register_control(ctrl,           loop, gc_state_iff);
 894   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 895 
 896   phase->register_new_node(thread,        old_ctrl);
 897   phase->register_new_node(gc_state_addr, old_ctrl);
 898   phase->register_new_node(gc_state,      old_ctrl);
 899   phase->register_new_node(gc_state_and,  old_ctrl);
 900   phase->register_new_node(gc_state_cmp,  old_ctrl);
 901   phase->register_new_node(gc_state_bool, old_ctrl);
 902 
 903   phase->set_ctrl(gc_state_offset, phase->C->root());
 904 
 905   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 906 }
 907 
 908 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 909   Node* old_ctrl = ctrl;
 910   PhaseIterGVN& igvn = phase->igvn();
 911 
 912   const Type* val_t = igvn.type(val);
 913   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 914     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 915     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 916 
 917     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 918     ctrl             = new IfTrueNode(null_iff);
 919     null_ctrl        = new IfFalseNode(null_iff);
 920 
 921     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 922     phase->register_control(null_iff,  loop, old_ctrl);
 923     phase->register_control(ctrl,      loop, null_iff);
 924     phase->register_control(null_ctrl, loop, null_iff);
 925 
 926     phase->register_new_node(null_cmp,  old_ctrl);
 927     phase->register_new_node(null_test, old_ctrl);
 928   }
 929 }
 930 
 931 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 932   Node* old_ctrl = ctrl;
 933   PhaseIterGVN& igvn = phase->igvn();
 934 
 935   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 936   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 937 
 938   // Figure out the target cset address with raw pointer math.
 939   // This avoids matching AddP+LoadB that would emit inefficient code.
 940   // See JDK-8245465.
 941   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 942   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 943   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 944   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 945 
 946   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 947                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 948                                        TypeInt::BYTE, MemNode::unordered);
 949   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 950   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 951 
 952   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 953   ctrl                 = new IfTrueNode(cset_iff);
 954   not_cset_ctrl        = new IfFalseNode(cset_iff);
 955 
 956   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 957   phase->register_control(cset_iff,      loop, old_ctrl);
 958   phase->register_control(ctrl,          loop, cset_iff);
 959   phase->register_control(not_cset_ctrl, loop, cset_iff);
 960 
 961   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 962 
 963   phase->register_new_node(raw_val,        old_ctrl);
 964   phase->register_new_node(cset_idx,       old_ctrl);
 965   phase->register_new_node(cset_addr,      old_ctrl);
 966   phase->register_new_node(cset_load_addr, old_ctrl);
 967   phase->register_new_node(cset_load_ptr,  old_ctrl);
 968   phase->register_new_node(cset_load,      old_ctrl);
 969   phase->register_new_node(cset_cmp,       old_ctrl);
 970   phase->register_new_node(cset_bool,      old_ctrl);
 971 }
 972 
 973 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
 974                                                DecoratorSet decorators, PhaseIdealLoop* phase) {
 975   IdealLoopTree*loop = phase->get_loop(ctrl);
 976   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 977 
 978   address calladdr = nullptr;
 979   const char* name = nullptr;
 980   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 981   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 982   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 983   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 984   bool is_narrow  = UseCompressedOops && !is_native;
 985   if (is_strong) {
 986     if (is_narrow) {
 987       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 988       name = "load_reference_barrier_strong_narrow";
 989     } else {
 990       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 991       name = "load_reference_barrier_strong";
 992     }
 993   } else if (is_weak) {
 994     if (is_narrow) {
 995       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 996       name = "load_reference_barrier_weak_narrow";
 997     } else {
 998       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 999       name = "load_reference_barrier_weak";
1000     }
1001   } else {
1002     assert(is_phantom, "only remaining strength");
1003     if (is_narrow) {
1004       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1005       name = "load_reference_barrier_phantom_narrow";
1006     } else {
1007       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1008       name = "load_reference_barrier_phantom";
1009     }
1010   }
1011   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1012 
1013   call->init_req(TypeFunc::Control, ctrl);
1014   call->init_req(TypeFunc::I_O, phase->C->top());
1015   call->init_req(TypeFunc::Memory, phase->C->top());
1016   call->init_req(TypeFunc::FramePtr, phase->C->top());
1017   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1018   call->init_req(TypeFunc::Parms, val);
1019   call->init_req(TypeFunc::Parms+1, load_addr);
1020   phase->register_control(call, loop, ctrl);
1021   ctrl = new ProjNode(call, TypeFunc::Control);
1022   phase->register_control(ctrl, loop, call);
1023   val = new ProjNode(call, TypeFunc::Parms);
1024   phase->register_new_node(val, call);
1025   val = new CheckCastPPNode(ctrl, val, obj_type);
1026   phase->register_new_node(val, ctrl);
1027 }
1028 
1029 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1030   Node* ctrl = phase->get_ctrl(barrier);
1031   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1032 
1033   // Update the control of all nodes that should be after the
1034   // barrier control flow
1035   uses.clear();
1036   // Every node that is control dependent on the barrier's input
1037   // control will be after the expanded barrier. The raw memory (if
1038   // its memory is control dependent on the barrier's input control)
1039   // must stay above the barrier.
1040   uses_to_ignore.clear();
1041   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1042     uses_to_ignore.push(init_raw_mem);
1043   }
1044   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1045     Node *n = uses_to_ignore.at(next);
1046     for (uint i = 0; i < n->req(); i++) {
1047       Node* in = n->in(i);
1048       if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1049         uses_to_ignore.push(in);
1050       }
1051     }
1052   }
1053   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1054     Node* u = ctrl->fast_out(i);
1055     if (u->_idx < last &&
1056         u != barrier &&
1057         !u->depends_only_on_test() && // preserve dependency on test
1058         !uses_to_ignore.member(u) &&
1059         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1060         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1061       Node* old_c = phase->ctrl_or_self(u);
1062       Node* c = old_c;
1063       if (c != ctrl ||
1064           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1065           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1066         phase->igvn().rehash_node_delayed(u);
1067         int nb = u->replace_edge(ctrl, region, &phase->igvn());
1068         if (u->is_CFG()) {
1069           if (phase->idom(u) == ctrl) {
1070             phase->set_idom(u, region, phase->dom_depth(region));
1071           }
1072         } else if (phase->get_ctrl(u) == ctrl) {
1073           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1074           uses.push(u);
1075         }
1076         assert(nb == 1, "more than 1 ctrl input?");
1077         --i, imax -= nb;
1078       }
1079     }
1080   }
1081 }
1082 
1083 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1084   Node* region = nullptr;
1085   while (c != ctrl) {
1086     if (c->is_Region()) {
1087       region = c;
1088     }
1089     c = phase->idom(c);
1090   }
1091   assert(region != nullptr, "");
1092   Node* phi = new PhiNode(region, n->bottom_type());
1093   for (uint j = 1; j < region->req(); j++) {
1094     Node* in = region->in(j);
1095     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1096       phi->init_req(j, n);
1097     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1098       phi->init_req(j, n_clone);
1099     } else {
1100       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1101     }
1102   }
1103   phase->register_new_node(phi, region);
1104   return phi;
1105 }
1106 
1107 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1108   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1109 
1110   Unique_Node_List uses;
1111   for (int i = 0; i < state->iu_barriers_count(); i++) {
1112     Node* barrier = state->iu_barrier(i);
1113     Node* ctrl = phase->get_ctrl(barrier);
1114     IdealLoopTree* loop = phase->get_loop(ctrl);
1115     Node* head = loop->head();
1116     if (head->is_OuterStripMinedLoop()) {
1117       // Expanding a barrier here will break loop strip mining
1118       // verification. Transform the loop so the loop nest doesn't
1119       // appear as strip mined.
1120       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1121       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1122     }
1123   }
1124 
1125   Node_Stack stack(0);
1126   Node_List clones;
1127   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1128     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1129 
1130     Node* ctrl = phase->get_ctrl(lrb);
1131     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1132 
1133     CallStaticJavaNode* unc = nullptr;
1134     Node* unc_ctrl = nullptr;
1135     Node* uncasted_val = val;
1136 
1137     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1138       Node* u = lrb->fast_out(i);
1139       if (u->Opcode() == Op_CastPP &&
1140           u->in(0) != nullptr &&
1141           phase->is_dominator(u->in(0), ctrl)) {
1142         const Type* u_t = phase->igvn().type(u);
1143 
1144         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1145             u->in(0)->Opcode() == Op_IfTrue &&
1146             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1147             u->in(0)->in(0)->is_If() &&
1148             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1149             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1150             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1151             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1152             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1153           IdealLoopTree* loop = phase->get_loop(ctrl);
1154           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1155 
1156           if (!unc_loop->is_member(loop)) {
1157             continue;
1158           }
1159 
1160           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1161           assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1162           if (branch == NodeSentinel) {
1163             continue;
1164           }
1165 
1166           Node* iff = u->in(0)->in(0);
1167           Node* bol = iff->in(1)->clone();
1168           Node* cmp = bol->in(1)->clone();
1169           cmp->set_req(1, lrb);
1170           bol->set_req(1, cmp);
1171           phase->igvn().replace_input_of(iff, 1, bol);
1172           phase->set_ctrl(lrb, iff->in(0));
1173           phase->register_new_node(cmp, iff->in(0));
1174           phase->register_new_node(bol, iff->in(0));
1175           break;
1176         }
1177       }
1178     }
1179     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1180       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1181       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1182         // The rethrow call may have too many projections to be
1183         // properly handled here. Given there's no reason for a
1184         // barrier to depend on the call, move it above the call
1185         stack.push(lrb, 0);
1186         do {
1187           Node* n = stack.node();
1188           uint idx = stack.index();
1189           if (idx < n->req()) {
1190             Node* in = n->in(idx);
1191             stack.set_index(idx+1);
1192             if (in != nullptr) {
1193               if (phase->has_ctrl(in)) {
1194                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1195 #ifdef ASSERT
1196                   for (uint i = 0; i < stack.size(); i++) {
1197                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1198                   }
1199 #endif
1200                   stack.push(in, 0);
1201                 }
1202               } else {
1203                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1204               }
1205             }
1206           } else {
1207             phase->set_ctrl(n, call->in(0));
1208             stack.pop();
1209           }
1210         } while(stack.size() > 0);
1211         continue;
1212       }
1213       CallProjections projs;
1214       call->extract_projections(&projs, false, false);
1215 
1216 #ifdef ASSERT
1217       VectorSet cloned;
1218 #endif
1219       Node* lrb_clone = lrb->clone();
1220       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1221       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1222 
1223       stack.push(lrb, 0);
1224       clones.push(lrb_clone);
1225 
1226       do {
1227         assert(stack.size() == clones.size(), "");
1228         Node* n = stack.node();
1229 #ifdef ASSERT
1230         if (n->is_Load()) {
1231           Node* mem = n->in(MemNode::Memory);
1232           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1233             Node* u = mem->fast_out(j);
1234             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1235           }
1236         }
1237 #endif
1238         uint idx = stack.index();
1239         Node* n_clone = clones.at(clones.size()-1);
1240         if (idx < n->outcnt()) {
1241           Node* u = n->raw_out(idx);
1242           Node* c = phase->ctrl_or_self(u);
1243           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1244             stack.set_index(idx+1);
1245             assert(!u->is_CFG(), "");
1246             stack.push(u, 0);
1247             assert(!cloned.test_set(u->_idx), "only one clone");
1248             Node* u_clone = u->clone();
1249             int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1250             assert(nb > 0, "should have replaced some uses");
1251             phase->register_new_node(u_clone, projs.catchall_catchproj);
1252             clones.push(u_clone);
1253             phase->set_ctrl(u, projs.fallthrough_catchproj);
1254           } else {
1255             bool replaced = false;
1256             if (u->is_Phi()) {
1257               for (uint k = 1; k < u->req(); k++) {
1258                 if (u->in(k) == n) {
1259                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1260                     phase->igvn().replace_input_of(u, k, n_clone);
1261                     replaced = true;
1262                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1263                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1264                     replaced = true;
1265                   }
1266                 }
1267               }
1268             } else {
1269               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1270                 phase->igvn().rehash_node_delayed(u);
1271                 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1272                 assert(nb > 0, "should have replaced some uses");
1273                 replaced = true;
1274               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1275                 if (u->is_If()) {
1276                   // Can't break If/Bool/Cmp chain
1277                   assert(n->is_Bool(), "unexpected If shape");
1278                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1279                   assert(n_clone->is_Bool(), "unexpected clone");
1280                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1281                   Node* bol_clone = n->clone();
1282                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1283                   bol_clone->set_req(1, cmp_clone);
1284 
1285                   Node* nn = stack.node_at(stack.size()-3);
1286                   Node* nn_clone = clones.at(clones.size()-3);
1287                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1288 
1289                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1290                                                    &phase->igvn());
1291                   assert(nb > 0, "should have replaced some uses");
1292 
1293                   phase->register_new_node(bol_clone, u->in(0));
1294                   phase->register_new_node(cmp_clone, u->in(0));
1295 
1296                   phase->igvn().replace_input_of(u, 1, bol_clone);
1297 
1298                 } else {
1299                   phase->igvn().rehash_node_delayed(u);
1300                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1301                   assert(nb > 0, "should have replaced some uses");
1302                 }
1303                 replaced = true;
1304               }
1305             }
1306             if (!replaced) {
1307               stack.set_index(idx+1);
1308             }
1309           }
1310         } else {
1311           stack.pop();
1312           clones.pop();
1313         }
1314       } while (stack.size() > 0);
1315       assert(stack.size() == 0 && clones.size() == 0, "");
1316     }
1317   }
1318 
1319   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1320     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1321     Node* ctrl = phase->get_ctrl(lrb);
1322     IdealLoopTree* loop = phase->get_loop(ctrl);
1323     Node* head = loop->head();
1324     if (head->is_OuterStripMinedLoop()) {
1325       // Expanding a barrier here will break loop strip mining
1326       // verification. Transform the loop so the loop nest doesn't
1327       // appear as strip mined.
1328       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1329       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1330     }
1331   }
1332 
1333   // Expand load-reference-barriers
1334   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1335   Unique_Node_List uses_to_ignore;
1336   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1337     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1338     uint last = phase->C->unique();
1339     Node* ctrl = phase->get_ctrl(lrb);
1340     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1341 
1342     Node* orig_ctrl = ctrl;
1343 
1344     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1345     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1346 
1347     IdealLoopTree *loop = phase->get_loop(ctrl);
1348 
1349     Node* heap_stable_ctrl = nullptr;
1350     Node* null_ctrl = nullptr;
1351 
1352     assert(val->bottom_type()->make_oopptr(), "need oop");
1353     assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1354 
1355     enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1356     Node* region = new RegionNode(PATH_LIMIT);
1357     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1358 
1359     // Stable path.
1360     int flags = ShenandoahHeap::HAS_FORWARDED;
1361     if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1362       flags |= ShenandoahHeap::WEAK_ROOTS;
1363     }
1364     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1365     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1366 
1367     // Heap stable case
1368     region->init_req(_heap_stable, heap_stable_ctrl);
1369     val_phi->init_req(_heap_stable, val);
1370 
1371     // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1372     // even for non-cset objects to prevent resurrection of such objects.
1373     // Wires !in_cset(obj) to slot 2 of region and phis
1374     Node* not_cset_ctrl = nullptr;
1375     if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1376       test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1377     }
1378     if (not_cset_ctrl != nullptr) {
1379       region->init_req(_not_cset, not_cset_ctrl);
1380       val_phi->init_req(_not_cset, val);
1381     } else {
1382       region->del_req(_not_cset);
1383       val_phi->del_req(_not_cset);
1384     }
1385 
1386     // Resolve object when orig-value is in cset.
1387     // Make the unconditional resolve for fwdptr.
1388 
1389     // Call lrb-stub and wire up that path in slots 4
1390     Node* result_mem = nullptr;
1391 
1392     Node* addr;
1393     {
1394       VectorSet visited;
1395       addr = get_load_addr(phase, visited, lrb);
1396     }
1397     if (addr->Opcode() == Op_AddP) {
1398       Node* orig_base = addr->in(AddPNode::Base);
1399       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1400       phase->register_new_node(base, ctrl);
1401       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1402         // Field access
1403         addr = addr->clone();
1404         addr->set_req(AddPNode::Base, base);
1405         addr->set_req(AddPNode::Address, base);
1406         phase->register_new_node(addr, ctrl);
1407       } else {
1408         Node* addr2 = addr->in(AddPNode::Address);
1409         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1410               addr2->in(AddPNode::Base) == orig_base) {
1411           addr2 = addr2->clone();
1412           addr2->set_req(AddPNode::Base, base);
1413           addr2->set_req(AddPNode::Address, base);
1414           phase->register_new_node(addr2, ctrl);
1415           addr = addr->clone();
1416           addr->set_req(AddPNode::Base, base);
1417           addr->set_req(AddPNode::Address, addr2);
1418           phase->register_new_node(addr, ctrl);
1419         }
1420       }
1421     }
1422     call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1423     region->init_req(_evac_path, ctrl);
1424     val_phi->init_req(_evac_path, val);
1425 
1426     phase->register_control(region, loop, heap_stable_iff);
1427     Node* out_val = val_phi;
1428     phase->register_new_node(val_phi, region);
1429 
1430     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1431 
1432     ctrl = orig_ctrl;
1433 
1434     phase->igvn().replace_node(lrb, out_val);
1435 
1436     follow_barrier_uses(out_val, ctrl, uses, phase);
1437 
1438     for(uint next = 0; next < uses.size(); next++ ) {
1439       Node *n = uses.at(next);
1440       assert(phase->get_ctrl(n) == ctrl, "bad control");
1441       assert(n != raw_mem, "should leave input raw mem above the barrier");
1442       phase->set_ctrl(n, region);
1443       follow_barrier_uses(n, ctrl, uses, phase);
1444     }
1445     fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1446   }
1447   // Done expanding load-reference-barriers.
1448   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1449 
1450   for (int i = state->iu_barriers_count() - 1; i >= 0; i--) {
1451     Node* barrier = state->iu_barrier(i);
1452     Node* pre_val = barrier->in(1);
1453 
1454     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1455       ShouldNotReachHere();
1456       continue;
1457     }
1458 
1459     Node* ctrl = phase->get_ctrl(barrier);
1460 
1461     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1462       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1463       ctrl = ctrl->in(0)->in(0);
1464       phase->set_ctrl(barrier, ctrl);
1465     } else if (ctrl->is_CallRuntime()) {
1466       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1467       ctrl = ctrl->in(0);
1468       phase->set_ctrl(barrier, ctrl);
1469     }
1470 
1471     Node* init_ctrl = ctrl;
1472     IdealLoopTree* loop = phase->get_loop(ctrl);
1473     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1474     Node* init_raw_mem = raw_mem;
1475     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1476     Node* heap_stable_ctrl = nullptr;
1477     Node* null_ctrl = nullptr;
1478     uint last = phase->C->unique();
1479 
1480     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1481     Node* region = new RegionNode(PATH_LIMIT);
1482     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1483 
1484     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1485     Node* region2 = new RegionNode(PATH_LIMIT2);
1486     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1487 
1488     // Stable path.
1489     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1490     region->init_req(_heap_stable, heap_stable_ctrl);
1491     phi->init_req(_heap_stable, raw_mem);
1492 
1493     // Null path
1494     Node* reg2_ctrl = nullptr;
1495     test_null(ctrl, pre_val, null_ctrl, phase);
1496     if (null_ctrl != nullptr) {
1497       reg2_ctrl = null_ctrl->in(0);
1498       region2->init_req(_null_path, null_ctrl);
1499       phi2->init_req(_null_path, raw_mem);
1500     } else {
1501       region2->del_req(_null_path);
1502       phi2->del_req(_null_path);
1503     }
1504 
1505     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1506     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1507     Node* thread = new ThreadLocalNode();
1508     phase->register_new_node(thread, ctrl);
1509     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1510     phase->register_new_node(buffer_adr, ctrl);
1511     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1512     phase->register_new_node(index_adr, ctrl);
1513 
1514     BasicType index_bt = TypeX_X->basic_type();
1515     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1516     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1517     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1518     phase->register_new_node(index, ctrl);
1519     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1520     phase->register_new_node(index_cmp, ctrl);
1521     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1522     phase->register_new_node(index_test, ctrl);
1523     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1524     if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff;
1525     phase->register_control(queue_full_iff, loop, ctrl);
1526     Node* not_full = new IfTrueNode(queue_full_iff);
1527     phase->register_control(not_full, loop, queue_full_iff);
1528     Node* full = new IfFalseNode(queue_full_iff);
1529     phase->register_control(full, loop, queue_full_iff);
1530 
1531     ctrl = not_full;
1532 
1533     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1534     phase->register_new_node(next_index, ctrl);
1535 
1536     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1537     phase->register_new_node(buffer, ctrl);
1538     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1539     phase->register_new_node(log_addr, ctrl);
1540     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1541     phase->register_new_node(log_store, ctrl);
1542     // update the index
1543     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1544     phase->register_new_node(index_update, ctrl);
1545 
1546     // Fast-path case
1547     region2->init_req(_fast_path, ctrl);
1548     phi2->init_req(_fast_path, index_update);
1549 
1550     ctrl = full;
1551 
1552     Node* base = find_bottom_mem(ctrl, phase);
1553 
1554     MergeMemNode* mm = MergeMemNode::make(base);
1555     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1556     phase->register_new_node(mm, ctrl);
1557 
1558     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1559     call->init_req(TypeFunc::Control, ctrl);
1560     call->init_req(TypeFunc::I_O, phase->C->top());
1561     call->init_req(TypeFunc::Memory, mm);
1562     call->init_req(TypeFunc::FramePtr, phase->C->top());
1563     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1564     call->init_req(TypeFunc::Parms, pre_val);
1565     call->init_req(TypeFunc::Parms+1, thread);
1566     phase->register_control(call, loop, ctrl);
1567 
1568     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1569     phase->register_control(ctrl_proj, loop, call);
1570     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1571     phase->register_new_node(mem_proj, call);
1572 
1573     // Slow-path case
1574     region2->init_req(_slow_path, ctrl_proj);
1575     phi2->init_req(_slow_path, mem_proj);
1576 
1577     phase->register_control(region2, loop, reg2_ctrl);
1578     phase->register_new_node(phi2, region2);
1579 
1580     region->init_req(_heap_unstable, region2);
1581     phi->init_req(_heap_unstable, phi2);
1582 
1583     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1584     phase->register_new_node(phi, region);
1585 
1586     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1587     for(uint next = 0; next < uses.size(); next++ ) {
1588       Node *n = uses.at(next);
1589       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1590       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1591       phase->set_ctrl(n, region);
1592       follow_barrier_uses(n, init_ctrl, uses, phase);
1593     }
1594     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1595 
1596     phase->igvn().replace_node(barrier, pre_val);
1597   }
1598   assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1599 
1600 }
1601 
1602 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1603   if (visited.test_set(in->_idx)) {
1604     return nullptr;
1605   }
1606   switch (in->Opcode()) {
1607     case Op_Proj:
1608       return get_load_addr(phase, visited, in->in(0));
1609     case Op_CastPP:
1610     case Op_CheckCastPP:
1611     case Op_DecodeN:
1612     case Op_EncodeP:
1613       return get_load_addr(phase, visited, in->in(1));
1614     case Op_LoadN:
1615     case Op_LoadP:
1616       return in->in(MemNode::Address);
1617     case Op_CompareAndExchangeN:
1618     case Op_CompareAndExchangeP:
1619     case Op_GetAndSetN:
1620     case Op_GetAndSetP:
1621     case Op_ShenandoahCompareAndExchangeP:
1622     case Op_ShenandoahCompareAndExchangeN:
1623       // Those instructions would just have stored a different
1624       // value into the field. No use to attempt to fix it at this point.
1625       return phase->igvn().zerocon(T_OBJECT);
1626     case Op_CMoveP:
1627     case Op_CMoveN: {
1628       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1629       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1630       // Handle unambiguous cases: single address reported on both branches.
1631       if (t != nullptr && f == nullptr) return t;
1632       if (t == nullptr && f != nullptr) return f;
1633       if (t != nullptr && t == f)    return t;
1634       // Ambiguity.
1635       return phase->igvn().zerocon(T_OBJECT);
1636     }
1637     case Op_Phi: {
1638       Node* addr = nullptr;
1639       for (uint i = 1; i < in->req(); i++) {
1640         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1641         if (addr == nullptr) {
1642           addr = addr1;
1643         }
1644         if (addr != addr1) {
1645           return phase->igvn().zerocon(T_OBJECT);
1646         }
1647       }
1648       return addr;
1649     }
1650     case Op_ShenandoahLoadReferenceBarrier:
1651       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1652     case Op_ShenandoahIUBarrier:
1653       return get_load_addr(phase, visited, in->in(1));
1654     case Op_CallDynamicJava:
1655     case Op_CallLeaf:
1656     case Op_CallStaticJava:
1657     case Op_ConN:
1658     case Op_ConP:
1659     case Op_Parm:
1660     case Op_CreateEx:
1661       return phase->igvn().zerocon(T_OBJECT);
1662     default:
1663 #ifdef ASSERT
1664       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1665 #endif
1666       return phase->igvn().zerocon(T_OBJECT);
1667   }
1668 
1669 }
1670 
1671 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1672   IdealLoopTree *loop = phase->get_loop(iff);
1673   Node* loop_head = loop->_head;
1674   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1675 
1676   Node* bol = iff->in(1);
1677   Node* cmp = bol->in(1);
1678   Node* andi = cmp->in(1);
1679   Node* load = andi->in(1);
1680 
1681   assert(is_gc_state_load(load), "broken");
1682   if (!phase->is_dominator(load->in(0), entry_c)) {
1683     Node* mem_ctrl = nullptr;
1684     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1685     load = load->clone();
1686     load->set_req(MemNode::Memory, mem);
1687     load->set_req(0, entry_c);
1688     phase->register_new_node(load, entry_c);
1689     andi = andi->clone();
1690     andi->set_req(1, load);
1691     phase->register_new_node(andi, entry_c);
1692     cmp = cmp->clone();
1693     cmp->set_req(1, andi);
1694     phase->register_new_node(cmp, entry_c);
1695     bol = bol->clone();
1696     bol->set_req(1, cmp);
1697     phase->register_new_node(bol, entry_c);
1698 
1699     phase->igvn().replace_input_of(iff, 1, bol);
1700   }
1701 }
1702 
1703 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1704   if (!n->is_If() || n->is_CountedLoopEnd()) {
1705     return false;
1706   }
1707   Node* region = n->in(0);
1708 
1709   if (!region->is_Region()) {
1710     return false;
1711   }
1712   Node* dom = phase->idom(region);
1713   if (!dom->is_If()) {
1714     return false;
1715   }
1716 
1717   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1718     return false;
1719   }
1720 
1721   IfNode* dom_if = dom->as_If();
1722   Node* proj_true = dom_if->proj_out(1);
1723   Node* proj_false = dom_if->proj_out(0);
1724 
1725   for (uint i = 1; i < region->req(); i++) {
1726     if (phase->is_dominator(proj_true, region->in(i))) {
1727       continue;
1728     }
1729     if (phase->is_dominator(proj_false, region->in(i))) {
1730       continue;
1731     }
1732     return false;
1733   }
1734 
1735   return true;
1736 }
1737 
1738 bool ShenandoahBarrierC2Support::merge_point_safe(Node* region) {
1739   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1740     Node* n = region->fast_out(i);
1741     if (n->is_LoadStore()) {
1742       // Splitting a LoadStore node through phi, causes it to lose its SCMemProj: the split if code doesn't have support
1743       // for a LoadStore at the region the if is split through because that's not expected to happen (LoadStore nodes
1744       // should be between barrier nodes). It does however happen with Shenandoah though because barriers can get
1745       // expanded around a LoadStore node.
1746       return false;
1747     }
1748   }
1749   return true;
1750 }
1751 
1752 
1753 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1754   assert(is_heap_stable_test(n), "no other tests");
1755   if (identical_backtoback_ifs(n, phase)) {
1756     Node* n_ctrl = n->in(0);
1757     if (phase->can_split_if(n_ctrl) && merge_point_safe(n_ctrl)) {
1758       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1759       if (is_heap_stable_test(n)) {
1760         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1761         assert(is_gc_state_load(gc_state_load), "broken");
1762         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1763         assert(is_gc_state_load(dom_gc_state_load), "broken");
1764         if (gc_state_load != dom_gc_state_load) {
1765           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1766         }
1767       }
1768       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1769       Node* proj_true = dom_if->proj_out(1);
1770       Node* proj_false = dom_if->proj_out(0);
1771       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1772       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1773 
1774       for (uint i = 1; i < n_ctrl->req(); i++) {
1775         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1776           bolphi->init_req(i, con_true);
1777         } else {
1778           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1779           bolphi->init_req(i, con_false);
1780         }
1781       }
1782       phase->register_new_node(bolphi, n_ctrl);
1783       phase->igvn().replace_input_of(n, 1, bolphi);
1784       phase->do_split_if(n);
1785     }
1786   }
1787 }
1788 
1789 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1790   // Find first invariant test that doesn't exit the loop
1791   LoopNode *head = loop->_head->as_Loop();
1792   IfNode* unswitch_iff = nullptr;
1793   Node* n = head->in(LoopNode::LoopBackControl);
1794   int loop_has_sfpts = -1;
1795   while (n != head) {
1796     Node* n_dom = phase->idom(n);
1797     if (n->is_Region()) {
1798       if (n_dom->is_If()) {
1799         IfNode* iff = n_dom->as_If();
1800         if (iff->in(1)->is_Bool()) {
1801           BoolNode* bol = iff->in(1)->as_Bool();
1802           if (bol->in(1)->is_Cmp()) {
1803             // If condition is invariant and not a loop exit,
1804             // then found reason to unswitch.
1805             if (is_heap_stable_test(iff) &&
1806                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1807               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1808               if (loop_has_sfpts == -1) {
1809                 for(uint i = 0; i < loop->_body.size(); i++) {
1810                   Node *m = loop->_body[i];
1811                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1812                     loop_has_sfpts = 1;
1813                     break;
1814                   }
1815                 }
1816                 if (loop_has_sfpts == -1) {
1817                   loop_has_sfpts = 0;
1818                 }
1819               }
1820               if (!loop_has_sfpts) {
1821                 unswitch_iff = iff;
1822               }
1823             }
1824           }
1825         }
1826       }
1827     }
1828     n = n_dom;
1829   }
1830   return unswitch_iff;
1831 }
1832 
1833 
1834 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1835   Node_List heap_stable_tests;
1836   stack.push(phase->C->start(), 0);
1837   do {
1838     Node* n = stack.node();
1839     uint i = stack.index();
1840 
1841     if (i < n->outcnt()) {
1842       Node* u = n->raw_out(i);
1843       stack.set_index(i+1);
1844       if (!visited.test_set(u->_idx)) {
1845         stack.push(u, 0);
1846       }
1847     } else {
1848       stack.pop();
1849       if (n->is_If() && is_heap_stable_test(n)) {
1850         heap_stable_tests.push(n);
1851       }
1852     }
1853   } while (stack.size() > 0);
1854 
1855   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1856     Node* n = heap_stable_tests.at(i);
1857     assert(is_heap_stable_test(n), "only evacuation test");
1858     merge_back_to_back_tests(n, phase);
1859   }
1860 
1861   if (!phase->C->major_progress()) {
1862     VectorSet seen;
1863     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1864       Node* n = heap_stable_tests.at(i);
1865       IdealLoopTree* loop = phase->get_loop(n);
1866       if (loop != phase->ltree_root() &&
1867           loop->_child == nullptr &&
1868           !loop->_irreducible) {
1869         Node* head = loop->_head;
1870         if (head->is_Loop() &&
1871             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1872             !seen.test_set(head->_idx)) {
1873           IfNode* iff = find_unswitching_candidate(loop, phase);
1874           if (iff != nullptr) {
1875             Node* bol = iff->in(1);
1876             if (head->as_Loop()->is_strip_mined()) {
1877               head->as_Loop()->verify_strip_mined(0);
1878             }
1879             move_gc_state_test_out_of_loop(iff, phase);
1880 
1881             AutoNodeBudget node_budget(phase);
1882 
1883             if (loop->policy_unswitching(phase)) {
1884               if (head->as_Loop()->is_strip_mined()) {
1885                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1886                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1887               }
1888               phase->do_unswitching(loop, old_new);
1889             } else {
1890               // Not proceeding with unswitching. Move load back in
1891               // the loop.
1892               phase->igvn().replace_input_of(iff, 1, bol);
1893             }
1894           }
1895         }
1896       }
1897     }
1898   }
1899 }
1900 
1901 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) {
1902   ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
1903 }
1904 
1905 const Type* ShenandoahIUBarrierNode::bottom_type() const {
1906   if (in(1) == nullptr || in(1)->is_top()) {
1907     return Type::TOP;
1908   }
1909   const Type* t = in(1)->bottom_type();
1910   if (t == TypePtr::NULL_PTR) {
1911     return t;
1912   }
1913   return t->is_oopptr();
1914 }
1915 
1916 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
1917   if (in(1) == nullptr) {
1918     return Type::TOP;
1919   }
1920   const Type* t = phase->type(in(1));
1921   if (t == Type::TOP) {
1922     return Type::TOP;
1923   }
1924   if (t == TypePtr::NULL_PTR) {
1925     return t;
1926   }
1927   return t->is_oopptr();
1928 }
1929 
1930 int ShenandoahIUBarrierNode::needed(Node* n) {
1931   if (n == nullptr ||
1932       n->is_Allocate() ||
1933       n->Opcode() == Op_ShenandoahIUBarrier ||
1934       n->bottom_type() == TypePtr::NULL_PTR ||
1935       (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) {
1936     return NotNeeded;
1937   }
1938   if (n->is_Phi() ||
1939       n->is_CMove()) {
1940     return MaybeNeeded;
1941   }
1942   return Needed;
1943 }
1944 
1945 Node* ShenandoahIUBarrierNode::next(Node* n) {
1946   for (;;) {
1947     if (n == nullptr) {
1948       return n;
1949     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
1950       return n;
1951     } else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) {
1952       return n;
1953     } else if (n->is_ConstraintCast() ||
1954                n->Opcode() == Op_DecodeN ||
1955                n->Opcode() == Op_EncodeP) {
1956       n = n->in(1);
1957     } else if (n->is_Proj()) {
1958       n = n->in(0);
1959     } else {
1960       return n;
1961     }
1962   }
1963   ShouldNotReachHere();
1964   return nullptr;
1965 }
1966 
1967 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
1968   PhaseIterGVN* igvn = phase->is_IterGVN();
1969 
1970   Node* n = next(in(1));
1971 
1972   int cont = needed(n);
1973 
1974   if (cont == NotNeeded) {
1975     return in(1);
1976   } else if (cont == MaybeNeeded) {
1977     if (igvn == nullptr) {
1978       phase->record_for_igvn(this);
1979       return this;
1980     } else {
1981       ResourceMark rm;
1982       Unique_Node_List wq;
1983       uint wq_i = 0;
1984 
1985       for (;;) {
1986         if (n->is_Phi()) {
1987           for (uint i = 1; i < n->req(); i++) {
1988             Node* m = n->in(i);
1989             if (m != nullptr) {
1990               wq.push(m);
1991             }
1992           }
1993         } else {
1994           assert(n->is_CMove(), "nothing else here");
1995           Node* m = n->in(CMoveNode::IfFalse);
1996           wq.push(m);
1997           m = n->in(CMoveNode::IfTrue);
1998           wq.push(m);
1999         }
2000         Node* orig_n = nullptr;
2001         do {
2002           if (wq_i >= wq.size()) {
2003             return in(1);
2004           }
2005           n = wq.at(wq_i);
2006           wq_i++;
2007           orig_n = n;
2008           n = next(n);
2009           cont = needed(n);
2010           if (cont == Needed) {
2011             return this;
2012           }
2013         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2014       }
2015     }
2016   }
2017 
2018   return this;
2019 }
2020 
2021 #ifdef ASSERT
2022 static bool has_never_branch(Node* root) {
2023   for (uint i = 1; i < root->req(); i++) {
2024     Node* in = root->in(i);
2025     if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
2026       return true;
2027     }
2028   }
2029   return false;
2030 }
2031 #endif
2032 
2033 void MemoryGraphFixer::collect_memory_nodes() {
2034   Node_Stack stack(0);
2035   VectorSet visited;
2036   Node_List regions;
2037 
2038   // Walk the raw memory graph and create a mapping from CFG node to
2039   // memory node. Exclude phis for now.
2040   stack.push(_phase->C->root(), 1);
2041   do {
2042     Node* n = stack.node();
2043     int opc = n->Opcode();
2044     uint i = stack.index();
2045     if (i < n->req()) {
2046       Node* mem = nullptr;
2047       if (opc == Op_Root) {
2048         Node* in = n->in(i);
2049         int in_opc = in->Opcode();
2050         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2051           mem = in->in(TypeFunc::Memory);
2052         } else if (in_opc == Op_Halt) {
2053           if (in->in(0)->is_Region()) {
2054             Node* r = in->in(0);
2055             for (uint j = 1; j < r->req(); j++) {
2056               assert(!r->in(j)->is_NeverBranch(), "");
2057             }
2058           } else {
2059             Node* proj = in->in(0);
2060             assert(proj->is_Proj(), "");
2061             Node* in = proj->in(0);
2062             assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2063             if (in->is_CallStaticJava()) {
2064               mem = in->in(TypeFunc::Memory);
2065             } else if (in->Opcode() == Op_Catch) {
2066               Node* call = in->in(0)->in(0);
2067               assert(call->is_Call(), "");
2068               mem = call->in(TypeFunc::Memory);
2069             } else if (in->is_NeverBranch()) {
2070               mem = collect_memory_for_infinite_loop(in);
2071             }
2072           }
2073         } else {
2074 #ifdef ASSERT
2075           n->dump();
2076           in->dump();
2077 #endif
2078           ShouldNotReachHere();
2079         }
2080       } else {
2081         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2082         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2083         mem = n->in(i);
2084       }
2085       i++;
2086       stack.set_index(i);
2087       if (mem == nullptr) {
2088         continue;
2089       }
2090       for (;;) {
2091         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2092           break;
2093         }
2094         if (mem->is_Phi()) {
2095           stack.push(mem, 2);
2096           mem = mem->in(1);
2097         } else if (mem->is_Proj()) {
2098           stack.push(mem, mem->req());
2099           mem = mem->in(0);
2100         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2101           mem = mem->in(TypeFunc::Memory);
2102         } else if (mem->is_MergeMem()) {
2103           MergeMemNode* mm = mem->as_MergeMem();
2104           mem = mm->memory_at(_alias);
2105         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2106           assert(_alias == Compile::AliasIdxRaw, "");
2107           stack.push(mem, mem->req());
2108           mem = mem->in(MemNode::Memory);
2109         } else {
2110 #ifdef ASSERT
2111           mem->dump();
2112 #endif
2113           ShouldNotReachHere();
2114         }
2115       }
2116     } else {
2117       if (n->is_Phi()) {
2118         // Nothing
2119       } else if (!n->is_Root()) {
2120         Node* c = get_ctrl(n);
2121         _memory_nodes.map(c->_idx, n);
2122       }
2123       stack.pop();
2124     }
2125   } while(stack.is_nonempty());
2126 
2127   // Iterate over CFG nodes in rpo and propagate memory state to
2128   // compute memory state at regions, creating new phis if needed.
2129   Node_List rpo_list;
2130   visited.clear();
2131   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2132   Node* root = rpo_list.pop();
2133   assert(root == _phase->C->root(), "");
2134 
2135   const bool trace = false;
2136 #ifdef ASSERT
2137   if (trace) {
2138     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2139       Node* c = rpo_list.at(i);
2140       if (_memory_nodes[c->_idx] != nullptr) {
2141         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2142       }
2143     }
2144   }
2145 #endif
2146   uint last = _phase->C->unique();
2147 
2148 #ifdef ASSERT
2149   uint16_t max_depth = 0;
2150   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2151     IdealLoopTree* lpt = iter.current();
2152     max_depth = MAX2(max_depth, lpt->_nest);
2153   }
2154 #endif
2155 
2156   bool progress = true;
2157   int iteration = 0;
2158   Node_List dead_phis;
2159   while (progress) {
2160     progress = false;
2161     iteration++;
2162     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2163     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2164 
2165     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2166       Node* c = rpo_list.at(i);
2167 
2168       Node* prev_mem = _memory_nodes[c->_idx];
2169       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2170         Node* prev_region = regions[c->_idx];
2171         Node* unique = nullptr;
2172         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2173           Node* m = _memory_nodes[c->in(j)->_idx];
2174           assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2175           if (m != nullptr) {
2176             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2177               assert((c->is_Loop() && j == LoopNode::LoopBackControl) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2178               // continue
2179             } else if (unique == nullptr) {
2180               unique = m;
2181             } else if (m == unique) {
2182               // continue
2183             } else {
2184               unique = NodeSentinel;
2185             }
2186           }
2187         }
2188         assert(unique != nullptr, "empty phi???");
2189         if (unique != NodeSentinel) {
2190           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
2191             dead_phis.push(prev_region);
2192           }
2193           regions.map(c->_idx, unique);
2194         } else {
2195           Node* phi = nullptr;
2196           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2197             phi = prev_region;
2198             for (uint k = 1; k < c->req(); k++) {
2199               Node* m = _memory_nodes[c->in(k)->_idx];
2200               assert(m != nullptr, "expect memory state");
2201               phi->set_req(k, m);
2202             }
2203           } else {
2204             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
2205               Node* u = c->fast_out(j);
2206               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2207                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2208                 phi = u;
2209                 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
2210                   Node* m = _memory_nodes[c->in(k)->_idx];
2211                   assert(m != nullptr, "expect memory state");
2212                   if (u->in(k) != m) {
2213                     phi = NodeSentinel;
2214                   }
2215                 }
2216               }
2217             }
2218             if (phi == NodeSentinel) {
2219               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2220               for (uint k = 1; k < c->req(); k++) {
2221                 Node* m = _memory_nodes[c->in(k)->_idx];
2222                 assert(m != nullptr, "expect memory state");
2223                 phi->init_req(k, m);
2224               }
2225             }
2226           }
2227           if (phi != nullptr) {
2228             regions.map(c->_idx, phi);
2229           } else {
2230             assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2231           }
2232         }
2233         Node* current_region = regions[c->_idx];
2234         if (current_region != prev_region) {
2235           progress = true;
2236           if (prev_region == prev_mem) {
2237             _memory_nodes.map(c->_idx, current_region);
2238           }
2239         }
2240       } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2241         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2242         assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
2243         if (m != prev_mem) {
2244           _memory_nodes.map(c->_idx, m);
2245           progress = true;
2246         }
2247       }
2248 #ifdef ASSERT
2249       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2250 #endif
2251     }
2252   }
2253 
2254   // Replace existing phi with computed memory state for that region
2255   // if different (could be a new phi or a dominating memory node if
2256   // that phi was found to be useless).
2257   while (dead_phis.size() > 0) {
2258     Node* n = dead_phis.pop();
2259     n->replace_by(_phase->C->top());
2260     n->destruct(&_phase->igvn());
2261   }
2262   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2263     Node* c = rpo_list.at(i);
2264     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2265       Node* n = regions[c->_idx];
2266       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2267       if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2268         _phase->register_new_node(n, c);
2269       }
2270     }
2271   }
2272   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2273     Node* c = rpo_list.at(i);
2274     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2275       Node* n = regions[c->_idx];
2276       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2277       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2278         Node* u = c->fast_out(i);
2279         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2280             u != n) {
2281           assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
2282           if (u->adr_type() == TypePtr::BOTTOM) {
2283             fix_memory_uses(u, n, n, c);
2284           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2285             _phase->lazy_replace(u, n);
2286             --i; --imax;
2287           }
2288         }
2289       }
2290     }
2291   }
2292 }
2293 
2294 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
2295   Node* mem = nullptr;
2296   Node* head = in->in(0);
2297   assert(head->is_Region(), "unexpected infinite loop graph shape");
2298 
2299   Node* phi_mem = nullptr;
2300   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2301     Node* u = head->fast_out(j);
2302     if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2303       if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2304         assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2305         phi_mem = u;
2306       } else if (u->adr_type() == TypePtr::BOTTOM) {
2307         assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2308         if (phi_mem == nullptr) {
2309           phi_mem = u;
2310         }
2311       }
2312     }
2313   }
2314   if (phi_mem == nullptr) {
2315     ResourceMark rm;
2316     Node_Stack stack(0);
2317     stack.push(head, 1);
2318     do {
2319       Node* n = stack.node();
2320       uint i = stack.index();
2321       if (i >= n->req()) {
2322         stack.pop();
2323       } else {
2324         stack.set_index(i + 1);
2325         Node* c = n->in(i);
2326         assert(c != head, "should have found a safepoint on the way");
2327         if (stack.size() != 1 || _phase->is_dominator(head, c)) {
2328           for (;;) {
2329             if (c->is_Region()) {
2330               stack.push(c, 1);
2331               break;
2332             } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
2333               Node* m = c->in(TypeFunc::Memory);
2334               if (m->is_MergeMem()) {
2335                 m = m->as_MergeMem()->memory_at(_alias);
2336               }
2337               assert(mem == nullptr || mem == m, "several memory states");
2338               mem = m;
2339               break;
2340             } else {
2341               assert(c != c->in(0), "");
2342               c = c->in(0);
2343             }
2344           }
2345         }
2346       }
2347     } while (stack.size() > 0);
2348     assert(mem != nullptr, "should have found safepoint");
2349   } else {
2350     mem = phi_mem;
2351   }
2352   return mem;
2353 }
2354 
2355 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2356   Node* c = _phase->get_ctrl(n);
2357   if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
2358     assert(c == n->in(0), "");
2359     CallNode* call = c->as_Call();
2360     CallProjections projs;
2361     call->extract_projections(&projs, true, false);
2362     if (projs.catchall_memproj != nullptr) {
2363       if (projs.fallthrough_memproj == n) {
2364         c = projs.fallthrough_catchproj;
2365       } else {
2366         assert(projs.catchall_memproj == n, "");
2367         c = projs.catchall_catchproj;
2368       }
2369     }
2370   }
2371   return c;
2372 }
2373 
2374 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2375   if (_phase->has_ctrl(n))
2376     return get_ctrl(n);
2377   else {
2378     assert (n->is_CFG(), "must be a CFG node");
2379     return n;
2380   }
2381 }
2382 
2383 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2384   return m != nullptr && get_ctrl(m) == c;
2385 }
2386 
2387 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2388   assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
2389   assert(!ctrl->is_Call() || ctrl == n, "projection expected");
2390 #ifdef ASSERT
2391   if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
2392       (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
2393     CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
2394     int mems = 0;
2395     for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
2396       Node* u = call->fast_out(i);
2397       if (u->bottom_type() == Type::MEMORY) {
2398         mems++;
2399       }
2400     }
2401     assert(mems <= 1, "No node right after call if multiple mem projections");
2402   }
2403 #endif
2404   Node* mem = _memory_nodes[ctrl->_idx];
2405   Node* c = ctrl;
2406   while (!mem_is_valid(mem, c) &&
2407          (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2408     c = _phase->idom(c);
2409     mem = _memory_nodes[c->_idx];
2410   }
2411   if (n != nullptr && mem_is_valid(mem, c)) {
2412     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2413       mem = next_mem(mem, _alias);
2414     }
2415     if (mem->is_MergeMem()) {
2416       mem = mem->as_MergeMem()->memory_at(_alias);
2417     }
2418     if (!mem_is_valid(mem, c)) {
2419       do {
2420         c = _phase->idom(c);
2421         mem = _memory_nodes[c->_idx];
2422       } while (!mem_is_valid(mem, c) &&
2423                (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2424     }
2425   }
2426   assert(mem->bottom_type() == Type::MEMORY, "");
2427   return mem;
2428 }
2429 
2430 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2431   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2432     Node* use = region->fast_out(i);
2433     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2434         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2435       return true;
2436     }
2437   }
2438   return false;
2439 }
2440 
2441 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2442   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2443   const bool trace = false;
2444   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2445   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2446   GrowableArray<Node*> phis;
2447   if (mem_for_ctrl != mem) {
2448     Node* old = mem_for_ctrl;
2449     Node* prev = nullptr;
2450     while (old != mem) {
2451       prev = old;
2452       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2453         assert(_alias == Compile::AliasIdxRaw, "");
2454         old = old->in(MemNode::Memory);
2455       } else if (old->Opcode() == Op_SCMemProj) {
2456         assert(_alias == Compile::AliasIdxRaw, "");
2457         old = old->in(0);
2458       } else {
2459         ShouldNotReachHere();
2460       }
2461     }
2462     assert(prev != nullptr, "");
2463     if (new_ctrl != ctrl) {
2464       _memory_nodes.map(ctrl->_idx, mem);
2465       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2466     }
2467     uint input = (uint)MemNode::Memory;
2468     _phase->igvn().replace_input_of(prev, input, new_mem);
2469   } else {
2470     uses.clear();
2471     _memory_nodes.map(new_ctrl->_idx, new_mem);
2472     uses.push(new_ctrl);
2473     for(uint next = 0; next < uses.size(); next++ ) {
2474       Node *n = uses.at(next);
2475       assert(n->is_CFG(), "");
2476       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2477       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2478         Node* u = n->fast_out(i);
2479         if (!u->is_Root() && u->is_CFG() && u != n) {
2480           Node* m = _memory_nodes[u->_idx];
2481           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2482               !has_mem_phi(u) &&
2483               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2484             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2485             DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
2486 
2487             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2488               bool push = true;
2489               bool create_phi = true;
2490               if (_phase->is_dominator(new_ctrl, u)) {
2491                 create_phi = false;
2492               }
2493               if (create_phi) {
2494                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2495                 _phase->register_new_node(phi, u);
2496                 phis.push(phi);
2497                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2498                 if (!mem_is_valid(m, u)) {
2499                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2500                   _memory_nodes.map(u->_idx, phi);
2501                 } else {
2502                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2503                   for (;;) {
2504                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2505                     Node* next = nullptr;
2506                     if (m->is_Proj()) {
2507                       next = m->in(0);
2508                     } else {
2509                       assert(m->is_Mem() || m->is_LoadStore(), "");
2510                       assert(_alias == Compile::AliasIdxRaw, "");
2511                       next = m->in(MemNode::Memory);
2512                     }
2513                     if (_phase->get_ctrl(next) != u) {
2514                       break;
2515                     }
2516                     if (next->is_MergeMem()) {
2517                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2518                       break;
2519                     }
2520                     if (next->is_Phi()) {
2521                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2522                       break;
2523                     }
2524                     m = next;
2525                   }
2526 
2527                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2528                   assert(m->is_Mem() || m->is_LoadStore(), "");
2529                   uint input = (uint)MemNode::Memory;
2530                   _phase->igvn().replace_input_of(m, input, phi);
2531                   push = false;
2532                 }
2533               } else {
2534                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2535               }
2536               if (push) {
2537                 uses.push(u);
2538               }
2539             }
2540           } else if (!mem_is_valid(m, u) &&
2541                      !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2542             uses.push(u);
2543           }
2544         }
2545       }
2546     }
2547     for (int i = 0; i < phis.length(); i++) {
2548       Node* n = phis.at(i);
2549       Node* r = n->in(0);
2550       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2551       for (uint j = 1; j < n->req(); j++) {
2552         Node* m = find_mem(r->in(j), nullptr);
2553         _phase->igvn().replace_input_of(n, j, m);
2554         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2555       }
2556     }
2557   }
2558   uint last = _phase->C->unique();
2559   MergeMemNode* mm = nullptr;
2560   int alias = _alias;
2561   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2562   // Process loads first to not miss an anti-dependency: if the memory
2563   // edge of a store is updated before a load is processed then an
2564   // anti-dependency may be missed.
2565   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2566     Node* u = mem->out(i);
2567     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2568       Node* m = find_mem(_phase->get_ctrl(u), u);
2569       if (m != mem) {
2570         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2571         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2572         --i;
2573       }
2574     }
2575   }
2576   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2577     Node* u = mem->out(i);
2578     if (u->_idx < last) {
2579       if (u->is_Mem()) {
2580         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2581           Node* m = find_mem(_phase->get_ctrl(u), u);
2582           if (m != mem) {
2583             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2584             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2585             --i;
2586           }
2587         }
2588       } else if (u->is_MergeMem()) {
2589         MergeMemNode* u_mm = u->as_MergeMem();
2590         if (u_mm->memory_at(alias) == mem) {
2591           MergeMemNode* newmm = nullptr;
2592           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2593             Node* uu = u->fast_out(j);
2594             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2595             if (uu->is_Phi()) {
2596               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2597               Node* region = uu->in(0);
2598               int nb = 0;
2599               for (uint k = 1; k < uu->req(); k++) {
2600                 if (uu->in(k) == u) {
2601                   Node* m = find_mem(region->in(k), nullptr);
2602                   if (m != mem) {
2603                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2604                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2605                     if (newmm != u) {
2606                       _phase->igvn().replace_input_of(uu, k, newmm);
2607                       nb++;
2608                       --jmax;
2609                     }
2610                   }
2611                 }
2612               }
2613               if (nb > 0) {
2614                 --j;
2615               }
2616             } else {
2617               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2618               if (m != mem) {
2619                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2620                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2621                 if (newmm != u) {
2622                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2623                   --j, --jmax;
2624                 }
2625               }
2626             }
2627           }
2628         }
2629       } else if (u->is_Phi()) {
2630         assert(u->bottom_type() == Type::MEMORY, "what else?");
2631         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2632           Node* region = u->in(0);
2633           bool replaced = false;
2634           for (uint j = 1; j < u->req(); j++) {
2635             if (u->in(j) == mem) {
2636               Node* m = find_mem(region->in(j), nullptr);
2637               Node* nnew = m;
2638               if (m != mem) {
2639                 if (u->adr_type() == TypePtr::BOTTOM) {
2640                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2641                   nnew = mm;
2642                 }
2643                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2644                 _phase->igvn().replace_input_of(u, j, nnew);
2645                 replaced = true;
2646               }
2647             }
2648           }
2649           if (replaced) {
2650             --i;
2651           }
2652         }
2653       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2654                  u->adr_type() == nullptr) {
2655         assert(u->adr_type() != nullptr ||
2656                u->Opcode() == Op_Rethrow ||
2657                u->Opcode() == Op_Return ||
2658                u->Opcode() == Op_SafePoint ||
2659                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2660                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2661                u->Opcode() == Op_CallLeaf, "");
2662         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2663         if (m != mem) {
2664           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2665           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2666           --i;
2667         }
2668       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2669         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2670         if (m != mem) {
2671           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2672           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2673           --i;
2674         }
2675       } else if (u->adr_type() != TypePtr::BOTTOM &&
2676                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2677         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2678         assert(m != mem, "");
2679         // u is on the wrong slice...
2680         assert(u->is_ClearArray(), "");
2681         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2682         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2683         --i;
2684       }
2685     }
2686   }
2687 #ifdef ASSERT
2688   assert(new_mem->outcnt() > 0, "");
2689   for (int i = 0; i < phis.length(); i++) {
2690     Node* n = phis.at(i);
2691     assert(n->outcnt() > 0, "new phi must have uses now");
2692   }
2693 #endif
2694 }
2695 
2696 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2697   if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2698     _memory_nodes.map(ctrl->_idx, mem);
2699     _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2700   }
2701 }
2702 
2703 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2704   MergeMemNode* mm = MergeMemNode::make(mem);
2705   mm->set_memory_at(_alias, rep_proj);
2706   _phase->register_new_node(mm, rep_ctrl);
2707   return mm;
2708 }
2709 
2710 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2711   MergeMemNode* newmm = nullptr;
2712   MergeMemNode* u_mm = u->as_MergeMem();
2713   Node* c = _phase->get_ctrl(u);
2714   if (_phase->is_dominator(c, rep_ctrl)) {
2715     c = rep_ctrl;
2716   } else {
2717     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2718   }
2719   if (u->outcnt() == 1) {
2720     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2721       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2722       --i;
2723     } else {
2724       _phase->igvn().rehash_node_delayed(u);
2725       u_mm->set_memory_at(_alias, rep_proj);
2726     }
2727     newmm = u_mm;
2728     _phase->set_ctrl_and_loop(u, c);
2729   } else {
2730     // can't simply clone u and then change one of its input because
2731     // it adds and then removes an edge which messes with the
2732     // DUIterator
2733     newmm = MergeMemNode::make(u_mm->base_memory());
2734     for (uint j = 0; j < u->req(); j++) {
2735       if (j < newmm->req()) {
2736         if (j == (uint)_alias) {
2737           newmm->set_req(j, rep_proj);
2738         } else if (newmm->in(j) != u->in(j)) {
2739           newmm->set_req(j, u->in(j));
2740         }
2741       } else if (j == (uint)_alias) {
2742         newmm->add_req(rep_proj);
2743       } else {
2744         newmm->add_req(u->in(j));
2745       }
2746     }
2747     if ((uint)_alias >= u->req()) {
2748       newmm->set_memory_at(_alias, rep_proj);
2749     }
2750     _phase->register_new_node(newmm, c);
2751   }
2752   return newmm;
2753 }
2754 
2755 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2756   if (phi->adr_type() == TypePtr::BOTTOM) {
2757     Node* region = phi->in(0);
2758     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2759       Node* uu = region->fast_out(j);
2760       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2761         return false;
2762       }
2763     }
2764     return true;
2765   }
2766   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2767 }
2768 
2769 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2770   uint last = _phase-> C->unique();
2771   MergeMemNode* mm = nullptr;
2772   assert(mem->bottom_type() == Type::MEMORY, "");
2773   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2774     Node* u = mem->out(i);
2775     if (u != replacement && u->_idx < last) {
2776       if (u->is_MergeMem()) {
2777         MergeMemNode* u_mm = u->as_MergeMem();
2778         if (u_mm->memory_at(_alias) == mem) {
2779           MergeMemNode* newmm = nullptr;
2780           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2781             Node* uu = u->fast_out(j);
2782             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2783             if (uu->is_Phi()) {
2784               if (should_process_phi(uu)) {
2785                 Node* region = uu->in(0);
2786                 int nb = 0;
2787                 for (uint k = 1; k < uu->req(); k++) {
2788                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2789                     if (newmm == nullptr) {
2790                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2791                     }
2792                     if (newmm != u) {
2793                       _phase->igvn().replace_input_of(uu, k, newmm);
2794                       nb++;
2795                       --jmax;
2796                     }
2797                   }
2798                 }
2799                 if (nb > 0) {
2800                   --j;
2801                 }
2802               }
2803             } else {
2804               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2805                 if (newmm == nullptr) {
2806                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2807                 }
2808                 if (newmm != u) {
2809                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2810                   --j, --jmax;
2811                 }
2812               }
2813             }
2814           }
2815         }
2816       } else if (u->is_Phi()) {
2817         assert(u->bottom_type() == Type::MEMORY, "what else?");
2818         Node* region = u->in(0);
2819         if (should_process_phi(u)) {
2820           bool replaced = false;
2821           for (uint j = 1; j < u->req(); j++) {
2822             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2823               Node* nnew = rep_proj;
2824               if (u->adr_type() == TypePtr::BOTTOM) {
2825                 if (mm == nullptr) {
2826                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2827                 }
2828                 nnew = mm;
2829               }
2830               _phase->igvn().replace_input_of(u, j, nnew);
2831               replaced = true;
2832             }
2833           }
2834           if (replaced) {
2835             --i;
2836           }
2837 
2838         }
2839       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2840                  u->adr_type() == nullptr) {
2841         assert(u->adr_type() != nullptr ||
2842                u->Opcode() == Op_Rethrow ||
2843                u->Opcode() == Op_Return ||
2844                u->Opcode() == Op_SafePoint ||
2845                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2846                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2847                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2848         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2849           if (mm == nullptr) {
2850             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2851           }
2852           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2853           --i;
2854         }
2855       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2856         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2857           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2858           --i;
2859         }
2860       }
2861     }
2862   }
2863 }
2864 
2865 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2866 : Node(ctrl, obj), _decorators(decorators) {
2867   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2868 }
2869 
2870 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2871   return _decorators;
2872 }
2873 
2874 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2875   return sizeof(*this);
2876 }
2877 
2878 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2879   return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2880 }
2881 
2882 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2883   uint hash = Node::hash();
2884   hash += mask_decorators(_decorators);
2885   return hash;
2886 }
2887 
2888 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2889   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2890          mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2891 }
2892 
2893 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2894   if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2895     return Type::TOP;
2896   }
2897   const Type* t = in(ValueIn)->bottom_type();
2898   if (t == TypePtr::NULL_PTR) {
2899     return t;
2900   }
2901 
2902   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2903     return t;
2904   }
2905 
2906   return t->meet(TypePtr::NULL_PTR);
2907 }
2908 
2909 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2910   // Either input is TOP ==> the result is TOP
2911   const Type *t2 = phase->type(in(ValueIn));
2912   if( t2 == Type::TOP ) return Type::TOP;
2913 
2914   if (t2 == TypePtr::NULL_PTR) {
2915     return t2;
2916   }
2917 
2918   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2919     return t2;
2920   }
2921 
2922   return t2->meet(TypePtr::NULL_PTR);
2923 }
2924 
2925 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2926   Node* value = in(ValueIn);
2927   if (!needs_barrier(phase, value)) {
2928     return value;
2929   }
2930   return this;
2931 }
2932 
2933 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2934   Unique_Node_List visited;
2935   return needs_barrier_impl(phase, n, visited);
2936 }
2937 
2938 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2939   if (n == nullptr) return false;
2940   if (visited.member(n)) {
2941     return false; // Been there.
2942   }
2943   visited.push(n);
2944 
2945   if (n->is_Allocate()) {
2946     // tty->print_cr("optimize barrier on alloc");
2947     return false;
2948   }
2949   if (n->is_Call()) {
2950     // tty->print_cr("optimize barrier on call");
2951     return false;
2952   }
2953 
2954   const Type* type = phase->type(n);
2955   if (type == Type::TOP) {
2956     return false;
2957   }
2958   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2959     // tty->print_cr("optimize barrier on null");
2960     return false;
2961   }
2962   if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2963     // tty->print_cr("optimize barrier on constant");
2964     return false;
2965   }
2966 
2967   switch (n->Opcode()) {
2968     case Op_AddP:
2969       return true; // TODO: Can refine?
2970     case Op_LoadP:
2971     case Op_ShenandoahCompareAndExchangeN:
2972     case Op_ShenandoahCompareAndExchangeP:
2973     case Op_CompareAndExchangeN:
2974     case Op_CompareAndExchangeP:
2975     case Op_GetAndSetN:
2976     case Op_GetAndSetP:
2977       return true;
2978     case Op_Phi: {
2979       for (uint i = 1; i < n->req(); i++) {
2980         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2981       }
2982       return false;
2983     }
2984     case Op_CheckCastPP:
2985     case Op_CastPP:
2986       return needs_barrier_impl(phase, n->in(1), visited);
2987     case Op_Proj:
2988       return needs_barrier_impl(phase, n->in(0), visited);
2989     case Op_ShenandoahLoadReferenceBarrier:
2990       // tty->print_cr("optimize barrier on barrier");
2991       return false;
2992     case Op_Parm:
2993       // tty->print_cr("optimize barrier on input arg");
2994       return false;
2995     case Op_DecodeN:
2996     case Op_EncodeP:
2997       return needs_barrier_impl(phase, n->in(1), visited);
2998     case Op_LoadN:
2999       return true;
3000     case Op_CMoveN:
3001     case Op_CMoveP:
3002       return needs_barrier_impl(phase, n->in(2), visited) ||
3003              needs_barrier_impl(phase, n->in(3), visited);
3004     case Op_ShenandoahIUBarrier:
3005       return needs_barrier_impl(phase, n->in(1), visited);
3006     case Op_CreateEx:
3007       return false;
3008     default:
3009       break;
3010   }
3011 #ifdef ASSERT
3012   tty->print("need barrier on?: ");
3013   tty->print_cr("ins:");
3014   n->dump(2);
3015   tty->print_cr("outs:");
3016   n->dump(-2);
3017   ShouldNotReachHere();
3018 #endif
3019   return true;
3020 }