1 /*
   2  * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  32 #include "gc/shenandoah/shenandoahForwarding.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahRuntime.hpp"
  36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/callnode.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 
  47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  48   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  49   if (state->load_reference_barriers_count() > 0) {
  50     assert(C->post_loop_opts_phase(), "no loop opts allowed");
  51     C->reset_post_loop_opts_phase(); // ... but we know what we are doing
  52     C->clear_major_progress();
  53     PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
  54     if (C->failing()) return false;
  55     C->process_for_post_loop_opts_igvn(igvn);
  56     if (C->failing()) return false;
  57 
  58     C->set_post_loop_opts_phase(); // now for real!
  59   }
  60   return true;
  61 }
  62 
  63 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  64   if (!UseShenandoahGC) {
  65     return false;
  66   }
  67   assert(iff->is_If(), "bad input");
  68   if (iff->Opcode() != Op_If) {
  69     return false;
  70   }
  71   Node* bol = iff->in(1);
  72   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  73     return false;
  74   }
  75   Node* cmp = bol->in(1);
  76   if (cmp->Opcode() != Op_CmpI) {
  77     return false;
  78   }
  79   Node* in1 = cmp->in(1);
  80   Node* in2 = cmp->in(2);
  81   if (in2->find_int_con(-1) != 0) {
  82     return false;
  83   }
  84   if (in1->Opcode() != Op_AndI) {
  85     return false;
  86   }
  87   in2 = in1->in(2);
  88   if (in2->find_int_con(-1) != mask) {
  89     return false;
  90   }
  91   in1 = in1->in(1);
  92 
  93   return is_gc_state_load(in1);
  94 }
  95 
  96 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
  97   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
  98 }
  99 
 100 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 101   if (!UseShenandoahGC) {
 102     return false;
 103   }
 104   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 105     return false;
 106   }
 107   Node* addp = n->in(MemNode::Address);
 108   if (!addp->is_AddP()) {
 109     return false;
 110   }
 111   Node* base = addp->in(AddPNode::Address);
 112   Node* off = addp->in(AddPNode::Offset);
 113   if (base->Opcode() != Op_ThreadLocal) {
 114     return false;
 115   }
 116   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 117     return false;
 118   }
 119   return true;
 120 }
 121 
 122 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 123   assert(phase->is_dominator(stop, start), "bad inputs");
 124   ResourceMark rm;
 125   Unique_Node_List wq;
 126   wq.push(start);
 127   for (uint next = 0; next < wq.size(); next++) {
 128     Node *m = wq.at(next);
 129     if (m == stop) {
 130       continue;
 131     }
 132     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 133       return true;
 134     }
 135     if (m->is_Region()) {
 136       for (uint i = 1; i < m->req(); i++) {
 137         wq.push(m->in(i));
 138       }
 139     } else {
 140       wq.push(m->in(0));
 141     }
 142   }
 143   return false;
 144 }
 145 
 146 #ifdef ASSERT
 147 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 148   assert(phis.size() == 0, "");
 149 
 150   while (true) {
 151     if (in->bottom_type() == TypePtr::NULL_PTR) {
 152       if (trace) {tty->print_cr("null");}
 153     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 154       if (trace) {tty->print_cr("Non oop");}
 155     } else {
 156       if (in->is_ConstraintCast()) {
 157         in = in->in(1);
 158         continue;
 159       } else if (in->is_AddP()) {
 160         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 161         in = in->in(AddPNode::Address);
 162         continue;
 163       } else if (in->is_Con()) {
 164         if (trace) {
 165           tty->print("Found constant");
 166           in->dump();
 167         }
 168       } else if (in->Opcode() == Op_Parm) {
 169         if (trace) {
 170           tty->print("Found argument");
 171         }
 172       } else if (in->Opcode() == Op_CreateEx) {
 173         if (trace) {
 174           tty->print("Found create-exception");
 175         }
 176       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 177         if (trace) {
 178           tty->print("Found raw LoadP (OSR argument?)");
 179         }
 180       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 181         if (t == ShenandoahOopStore) {
 182           return false;
 183         }
 184         barriers_used.push(in);
 185         if (trace) {tty->print("Found barrier"); in->dump();}
 186       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 187         if (trace) {
 188           tty->print("Found alloc");
 189           in->in(0)->dump();
 190         }
 191       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 192         if (trace) {
 193           tty->print("Found Java call");
 194         }
 195       } else if (in->is_Phi()) {
 196         if (!visited.test_set(in->_idx)) {
 197           if (trace) {tty->print("Pushed phi:"); in->dump();}
 198           phis.push(in, 2);
 199           in = in->in(1);
 200           continue;
 201         }
 202         if (trace) {tty->print("Already seen phi:"); in->dump();}
 203       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 204         if (!visited.test_set(in->_idx)) {
 205           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 206           phis.push(in, CMoveNode::IfTrue);
 207           in = in->in(CMoveNode::IfFalse);
 208           continue;
 209         }
 210         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 211       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 212         in = in->in(1);
 213         continue;
 214       } else {
 215         return false;
 216       }
 217     }
 218     bool cont = false;
 219     while (phis.is_nonempty()) {
 220       uint idx = phis.index();
 221       Node* phi = phis.node();
 222       if (idx >= phi->req()) {
 223         if (trace) {tty->print("Popped phi:"); phi->dump();}
 224         phis.pop();
 225         continue;
 226       }
 227       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 228       in = phi->in(idx);
 229       phis.set_index(idx+1);
 230       cont = true;
 231       break;
 232     }
 233     if (!cont) {
 234       break;
 235     }
 236   }
 237   return true;
 238 }
 239 
 240 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 241   if (n1 != nullptr) {
 242     n1->dump(+10);
 243   }
 244   if (n2 != nullptr) {
 245     n2->dump(+10);
 246   }
 247   fatal("%s", msg);
 248 }
 249 
 250 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 251   ResourceMark rm;
 252   Unique_Node_List wq;
 253   GrowableArray<Node*> barriers;
 254   Unique_Node_List barriers_used;
 255   Node_Stack phis(0);
 256   VectorSet visited;
 257   const bool trace = false;
 258   const bool verify_no_useless_barrier = false;
 259 
 260   wq.push(root);
 261   for (uint next = 0; next < wq.size(); next++) {
 262     Node *n = wq.at(next);
 263     if (n->is_Load()) {
 264       const bool trace = false;
 265       if (trace) {tty->print("Verifying"); n->dump();}
 266       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 267         if (trace) {tty->print_cr("Load range/klass");}
 268       } else {
 269         const TypePtr* adr_type = n->as_Load()->adr_type();
 270 
 271         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 272           if (trace) {tty->print_cr("Mark load");}
 273         } else if (adr_type->isa_instptr() &&
 274                    adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 275                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
 276           if (trace) {tty->print_cr("Reference.get()");}
 277         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 278           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 279         }
 280       }
 281     } else if (n->is_Store()) {
 282       const bool trace = false;
 283 
 284       if (trace) {tty->print("Verifying"); n->dump();}
 285       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 286         Node* adr = n->in(MemNode::Address);
 287         bool verify = true;
 288 
 289         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 290           adr = adr->in(AddPNode::Address);
 291           if (adr->is_AddP()) {
 292             assert(adr->in(AddPNode::Base)->is_top(), "");
 293             adr = adr->in(AddPNode::Address);
 294             if (adr->Opcode() == Op_LoadP &&
 295                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 296                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 297                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 298               if (trace) {tty->print_cr("SATB prebarrier");}
 299               verify = false;
 300             }
 301           }
 302         }
 303 
 304         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
 305           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 306         }
 307       }
 308       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 309         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 310       }
 311     } else if (n->Opcode() == Op_CmpP) {
 312       const bool trace = false;
 313 
 314       Node* in1 = n->in(1);
 315       Node* in2 = n->in(2);
 316       if (in1->bottom_type()->isa_oopptr()) {
 317         if (trace) {tty->print("Verifying"); n->dump();}
 318 
 319         bool mark_inputs = false;
 320         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 321             (in1->is_Con() || in2->is_Con())) {
 322           if (trace) {tty->print_cr("Comparison against a constant");}
 323           mark_inputs = true;
 324         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 325                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 326           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 327           mark_inputs = true;
 328         } else {
 329           assert(in2->bottom_type()->isa_oopptr(), "");
 330 
 331           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 332               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 333             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 334           }
 335         }
 336         if (verify_no_useless_barrier &&
 337             mark_inputs &&
 338             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 339              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 340           phis.clear();
 341           visited.reset();
 342         }
 343       }
 344     } else if (n->is_LoadStore()) {
 345       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 346           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
 347         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 348       }
 349 
 350       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 351         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 352       }
 353     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 354       CallNode* call = n->as_Call();
 355 
 356       static struct {
 357         const char* name;
 358         struct {
 359           int pos;
 360           verify_type t;
 361         } args[6];
 362       } calls[] = {
 363         "array_partition_stub",
 364         { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore },   { -1, ShenandoahNone },
 365           { -1, ShenandoahNone },                { -1, ShenandoahNone },                  { -1, ShenandoahNone } },
 366         "arraysort_stub",
 367         { { TypeFunc::Parms, ShenandoahStore },  { -1, ShenandoahNone },                  { -1, ShenandoahNone },
 368           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 369         "aescrypt_encryptBlock",
 370         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 371           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 372         "aescrypt_decryptBlock",
 373         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 374           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 375         "multiplyToLen",
 376         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 377           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 378         "squareToLen",
 379         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 380           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 381         "montgomery_multiply",
 382         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 383           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 384         "montgomery_square",
 385         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 386           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 387         "mulAdd",
 388         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 389           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 390         "vectorizedMismatch",
 391         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 392           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 393         "updateBytesCRC32",
 394         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 395           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 396         "updateBytesAdler32",
 397         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 398           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 399         "updateBytesCRC32C",
 400         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 401           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 402         "counterMode_AESCrypt",
 403         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 404           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 405         "cipherBlockChaining_encryptAESCrypt",
 406         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 407           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 408         "cipherBlockChaining_decryptAESCrypt",
 409         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 410           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 411         "shenandoah_clone",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 413           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "ghash_processBlocks",
 415         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 416           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "sha1_implCompress",
 418         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "sha256_implCompress",
 421         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "sha512_implCompress",
 424         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 425           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "sha1_implCompressMB",
 427         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "sha256_implCompressMB",
 430         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "sha512_implCompressMB",
 433         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "encodeBlock",
 436         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "decodeBlock",
 439         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441       };
 442 
 443       if (call->is_call_to_arraycopystub()) {
 444         Node* dest = nullptr;
 445         const TypeTuple* args = n->as_Call()->_tf->domain();
 446         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 447           if (args->field_at(i)->isa_ptr()) {
 448             j++;
 449             if (j == 2) {
 450               dest = n->in(i);
 451               break;
 452             }
 453           }
 454         }
 455         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 456             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 457           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 458         }
 459       } else if (strlen(call->_name) > 5 &&
 460                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 461         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 462           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 463         }
 464       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 465         // skip
 466       } else {
 467         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 468         int i = 0;
 469         for (; i < calls_len; i++) {
 470           if (!strcmp(calls[i].name, call->_name)) {
 471             break;
 472           }
 473         }
 474         if (i != calls_len) {
 475           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 476           for (uint j = 0; j < args_len; j++) {
 477             int pos = calls[i].args[j].pos;
 478             if (pos == -1) {
 479               break;
 480             }
 481             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 482               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 483             }
 484           }
 485           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 486             if (call->in(j)->bottom_type()->make_ptr() &&
 487                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 488               uint k = 0;
 489               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 490               if (k == args_len) {
 491                 fatal("arg %d for call %s not covered", j, call->_name);
 492               }
 493             }
 494           }
 495         } else {
 496           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 497             if (call->in(j)->bottom_type()->make_ptr() &&
 498                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 499               fatal("%s not covered", call->_name);
 500             }
 501           }
 502         }
 503       }
 504     } else if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 505       // skip
 506     } else if (n->is_AddP()
 507                || n->is_Phi()
 508                || n->is_ConstraintCast()
 509                || n->Opcode() == Op_Return
 510                || n->Opcode() == Op_CMoveP
 511                || n->Opcode() == Op_CMoveN
 512                || n->Opcode() == Op_Rethrow
 513                || n->is_MemBar()
 514                || n->Opcode() == Op_Conv2B
 515                || n->Opcode() == Op_SafePoint
 516                || n->is_CallJava()
 517                || n->Opcode() == Op_Unlock
 518                || n->Opcode() == Op_EncodeP
 519                || n->Opcode() == Op_DecodeN) {
 520       // nothing to do
 521     } else {
 522       static struct {
 523         int opcode;
 524         struct {
 525           int pos;
 526           verify_type t;
 527         } inputs[2];
 528       } others[] = {
 529         Op_FastLock,
 530         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 531         Op_Lock,
 532         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 533         Op_ArrayCopy,
 534         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 535         Op_StrCompressedCopy,
 536         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 537         Op_StrInflatedCopy,
 538         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 539         Op_AryEq,
 540         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 541         Op_StrIndexOf,
 542         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 543         Op_StrComp,
 544         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 545         Op_StrEquals,
 546         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 547         Op_VectorizedHashCode,
 548         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 549         Op_EncodeISOArray,
 550         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 551         Op_CountPositives,
 552         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 553         Op_CastP2X,
 554         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 555         Op_StrIndexOfChar,
 556         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 557       };
 558 
 559       const int others_len = sizeof(others) / sizeof(others[0]);
 560       int i = 0;
 561       for (; i < others_len; i++) {
 562         if (others[i].opcode == n->Opcode()) {
 563           break;
 564         }
 565       }
 566       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 567       if (i != others_len) {
 568         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 569         for (uint j = 0; j < inputs_len; j++) {
 570           int pos = others[i].inputs[j].pos;
 571           if (pos == -1) {
 572             break;
 573           }
 574           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 575             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 576           }
 577         }
 578         for (uint j = 1; j < stop; j++) {
 579           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 580               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 581             uint k = 0;
 582             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 583             if (k == inputs_len) {
 584               fatal("arg %d for node %s not covered", j, n->Name());
 585             }
 586           }
 587         }
 588       } else {
 589         for (uint j = 1; j < stop; j++) {
 590           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 591               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 592             fatal("%s not covered", n->Name());
 593           }
 594         }
 595       }
 596     }
 597 
 598     if (n->is_SafePoint()) {
 599       SafePointNode* sfpt = n->as_SafePoint();
 600       if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
 601         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 602           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 603             phis.clear();
 604             visited.reset();
 605           }
 606         }
 607       }
 608     }
 609   }
 610 
 611   if (verify_no_useless_barrier) {
 612     for (int i = 0; i < barriers.length(); i++) {
 613       Node* n = barriers.at(i);
 614       if (!barriers_used.member(n)) {
 615         tty->print("XXX useless barrier"); n->dump(-2);
 616         ShouldNotReachHere();
 617       }
 618     }
 619   }
 620 }
 621 #endif
 622 
 623 bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
 624                                                                    Node* control) {
 625   return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
 626          phase->ctrl_or_self(maybe_load) == control;
 627 }
 628 
 629 void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
 630   if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
 631     return;
 632   }
 633   Node* mem = maybe_store->in(MemNode::Memory);
 634   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 635     Node* u = mem->fast_out(i);
 636     if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
 637       wq.push(u);
 638     }
 639   }
 640 }
 641 
 642 void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
 643   for (uint i = 0; i < n->req(); i++) {
 644     Node* in = n->in(i);
 645     if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
 646       wq.push(in);
 647     }
 648   }
 649 }
 650 
 651 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 652   // That both nodes have the same control is not sufficient to prove
 653   // domination, verify that there's no path from d to n
 654   ResourceMark rm;
 655   Unique_Node_List wq;
 656   wq.push(d);
 657   for (uint next = 0; next < wq.size(); next++) {
 658     Node *m = wq.at(next);
 659     if (m == n) {
 660       return false;
 661     }
 662     if (m->is_Phi() && m->in(0)->is_Loop()) {
 663       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 664     } else {
 665       // Take anti-dependencies into account
 666       maybe_push_anti_dependent_loads(phase, m, c, wq);
 667       push_data_inputs_at_control(phase, m, c, wq);
 668     }
 669   }
 670   return true;
 671 }
 672 
 673 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 674   if (d_c != n_c) {
 675     return phase->is_dominator(d_c, n_c);
 676   }
 677   return is_dominator_same_ctrl(d_c, d, n, phase);
 678 }
 679 
 680 Node* next_mem(Node* mem, int alias) {
 681   Node* res = nullptr;
 682   if (mem->is_Proj()) {
 683     res = mem->in(0);
 684   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 685     res = mem->in(TypeFunc::Memory);
 686   } else if (mem->is_Phi()) {
 687     res = mem->in(1);
 688   } else if (mem->is_MergeMem()) {
 689     res = mem->as_MergeMem()->memory_at(alias);
 690   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 691     assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 692     res = mem->in(MemNode::Memory);
 693   } else {
 694 #ifdef ASSERT
 695     mem->dump();
 696 #endif
 697     ShouldNotReachHere();
 698   }
 699   return res;
 700 }
 701 
 702 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 703   Node* iffproj = nullptr;
 704   while (c != dom) {
 705     Node* next = phase->idom(c);
 706     assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 707     if (c->is_Region()) {
 708       ResourceMark rm;
 709       Unique_Node_List wq;
 710       wq.push(c);
 711       for (uint i = 0; i < wq.size(); i++) {
 712         Node *n = wq.at(i);
 713         if (n == next) {
 714           continue;
 715         }
 716         if (n->is_Region()) {
 717           for (uint j = 1; j < n->req(); j++) {
 718             wq.push(n->in(j));
 719           }
 720         } else {
 721           wq.push(n->in(0));
 722         }
 723       }
 724       for (uint i = 0; i < wq.size(); i++) {
 725         Node *n = wq.at(i);
 726         assert(n->is_CFG(), "");
 727         if (n->is_Multi()) {
 728           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 729             Node* u = n->fast_out(j);
 730             if (u->is_CFG()) {
 731               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 732                 return NodeSentinel;
 733               }
 734             }
 735           }
 736         }
 737       }
 738     } else  if (c->is_Proj()) {
 739       if (c->is_IfProj()) {
 740         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
 741           // continue;
 742         } else {
 743           if (!allow_one_proj) {
 744             return NodeSentinel;
 745           }
 746           if (iffproj == nullptr) {
 747             iffproj = c;
 748           } else {
 749             return NodeSentinel;
 750           }
 751         }
 752       } else if (c->Opcode() == Op_JumpProj) {
 753         return NodeSentinel; // unsupported
 754       } else if (c->Opcode() == Op_CatchProj) {
 755         return NodeSentinel; // unsupported
 756       } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
 757         return NodeSentinel; // unsupported
 758       } else {
 759         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 760       }
 761     }
 762     c = next;
 763   }
 764   return iffproj;
 765 }
 766 
 767 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 768   ResourceMark rm;
 769   VectorSet wq;
 770   wq.set(mem->_idx);
 771   mem_ctrl = phase->ctrl_or_self(mem);
 772   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 773     mem = next_mem(mem, alias);
 774     if (wq.test_set(mem->_idx)) {
 775       return nullptr;
 776     }
 777     mem_ctrl = phase->ctrl_or_self(mem);
 778   }
 779   if (mem->is_MergeMem()) {
 780     mem = mem->as_MergeMem()->memory_at(alias);
 781     mem_ctrl = phase->ctrl_or_self(mem);
 782   }
 783   return mem;
 784 }
 785 
 786 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 787   Node* mem = nullptr;
 788   Node* c = ctrl;
 789   do {
 790     if (c->is_Region()) {
 791       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
 792         Node* u = c->fast_out(i);
 793         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 794           if (u->adr_type() == TypePtr::BOTTOM) {
 795             mem = u;
 796           }
 797         }
 798       }
 799     } else {
 800       if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
 801         CallProjections projs;
 802         c->as_Call()->extract_projections(&projs, true, false);
 803         if (projs.fallthrough_memproj != nullptr) {
 804           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 805             if (projs.catchall_memproj == nullptr) {
 806               mem = projs.fallthrough_memproj;
 807             } else {
 808               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 809                 mem = projs.fallthrough_memproj;
 810               } else {
 811                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 812                 mem = projs.catchall_memproj;
 813               }
 814             }
 815           }
 816         } else {
 817           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 818           if (proj != nullptr &&
 819               proj->adr_type() == TypePtr::BOTTOM) {
 820             mem = proj;
 821           }
 822         }
 823       } else {
 824         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 825           Node* u = c->fast_out(i);
 826           if (u->is_Proj() &&
 827               u->bottom_type() == Type::MEMORY &&
 828               u->adr_type() == TypePtr::BOTTOM) {
 829               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 830               assert(mem == nullptr, "only one proj");
 831               mem = u;
 832           }
 833         }
 834         assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
 835       }
 836     }
 837     c = phase->idom(c);
 838   } while (mem == nullptr);
 839   return mem;
 840 }
 841 
 842 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 843   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 844     Node* u = n->fast_out(i);
 845     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 846       uses.push(u);
 847     }
 848   }
 849 }
 850 
 851 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 852   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 853   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 854   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 855   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 856   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 857   phase->lazy_replace(outer, new_outer);
 858   phase->lazy_replace(le, new_le);
 859   inner->clear_strip_mined();
 860 }
 861 
 862 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 863                                                PhaseIdealLoop* phase, int flags) {
 864   PhaseIterGVN& igvn = phase->igvn();
 865   Node* old_ctrl = ctrl;
 866 
 867   Node* thread          = new ThreadLocalNode();
 868   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 869   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 870   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 871                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 872                                         TypeInt::BYTE, MemNode::unordered);
 873   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 874   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 875   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 876 
 877   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 878   ctrl                  = new IfTrueNode(gc_state_iff);
 879   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 880 
 881   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 882   phase->register_control(gc_state_iff,   loop, old_ctrl);
 883   phase->register_control(ctrl,           loop, gc_state_iff);
 884   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 885 
 886   phase->register_new_node(thread,        old_ctrl);
 887   phase->register_new_node(gc_state_addr, old_ctrl);
 888   phase->register_new_node(gc_state,      old_ctrl);
 889   phase->register_new_node(gc_state_and,  old_ctrl);
 890   phase->register_new_node(gc_state_cmp,  old_ctrl);
 891   phase->register_new_node(gc_state_bool, old_ctrl);
 892 
 893   phase->set_ctrl(gc_state_offset, phase->C->root());
 894 
 895   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 896 }
 897 
 898 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 899   Node* old_ctrl = ctrl;
 900   PhaseIterGVN& igvn = phase->igvn();
 901 
 902   const Type* val_t = igvn.type(val);
 903   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 904     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 905     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 906 
 907     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 908     ctrl             = new IfTrueNode(null_iff);
 909     null_ctrl        = new IfFalseNode(null_iff);
 910 
 911     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 912     phase->register_control(null_iff,  loop, old_ctrl);
 913     phase->register_control(ctrl,      loop, null_iff);
 914     phase->register_control(null_ctrl, loop, null_iff);
 915 
 916     phase->register_new_node(null_cmp,  old_ctrl);
 917     phase->register_new_node(null_test, old_ctrl);
 918   }
 919 }
 920 
 921 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 922   Node* old_ctrl = ctrl;
 923   PhaseIterGVN& igvn = phase->igvn();
 924 
 925   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 926   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 927 
 928   // Figure out the target cset address with raw pointer math.
 929   // This avoids matching AddP+LoadB that would emit inefficient code.
 930   // See JDK-8245465.
 931   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 932   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 933   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 934   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 935 
 936   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 937                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 938                                        TypeInt::BYTE, MemNode::unordered);
 939   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 940   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 941 
 942   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 943   ctrl                 = new IfTrueNode(cset_iff);
 944   not_cset_ctrl        = new IfFalseNode(cset_iff);
 945 
 946   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 947   phase->register_control(cset_iff,      loop, old_ctrl);
 948   phase->register_control(ctrl,          loop, cset_iff);
 949   phase->register_control(not_cset_ctrl, loop, cset_iff);
 950 
 951   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 952 
 953   phase->register_new_node(raw_val,        old_ctrl);
 954   phase->register_new_node(cset_idx,       old_ctrl);
 955   phase->register_new_node(cset_addr,      old_ctrl);
 956   phase->register_new_node(cset_load_addr, old_ctrl);
 957   phase->register_new_node(cset_load_ptr,  old_ctrl);
 958   phase->register_new_node(cset_load,      old_ctrl);
 959   phase->register_new_node(cset_cmp,       old_ctrl);
 960   phase->register_new_node(cset_bool,      old_ctrl);
 961 }
 962 
 963 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
 964                                                DecoratorSet decorators, PhaseIdealLoop* phase) {
 965   IdealLoopTree*loop = phase->get_loop(ctrl);
 966   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 967 
 968   address calladdr = nullptr;
 969   const char* name = nullptr;
 970   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 971   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 972   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 973   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 974   bool is_narrow  = UseCompressedOops && !is_native;
 975   if (is_strong) {
 976     if (is_narrow) {
 977       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 978       name = "load_reference_barrier_strong_narrow";
 979     } else {
 980       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 981       name = "load_reference_barrier_strong";
 982     }
 983   } else if (is_weak) {
 984     if (is_narrow) {
 985       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 986       name = "load_reference_barrier_weak_narrow";
 987     } else {
 988       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 989       name = "load_reference_barrier_weak";
 990     }
 991   } else {
 992     assert(is_phantom, "only remaining strength");
 993     if (is_narrow) {
 994       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
 995       name = "load_reference_barrier_phantom_narrow";
 996     } else {
 997       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
 998       name = "load_reference_barrier_phantom";
 999     }
1000   }
1001   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1002 
1003   call->init_req(TypeFunc::Control, ctrl);
1004   call->init_req(TypeFunc::I_O, phase->C->top());
1005   call->init_req(TypeFunc::Memory, phase->C->top());
1006   call->init_req(TypeFunc::FramePtr, phase->C->top());
1007   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1008   call->init_req(TypeFunc::Parms, val);
1009   call->init_req(TypeFunc::Parms+1, load_addr);
1010   phase->register_control(call, loop, ctrl);
1011   ctrl = new ProjNode(call, TypeFunc::Control);
1012   phase->register_control(ctrl, loop, call);
1013   val = new ProjNode(call, TypeFunc::Parms);
1014   phase->register_new_node(val, call);
1015   val = new CheckCastPPNode(ctrl, val, obj_type);
1016   phase->register_new_node(val, ctrl);
1017 }
1018 
1019 void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
1020   nodes_above_barrier.clear();
1021   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1022     nodes_above_barrier.push(init_raw_mem);
1023   }
1024   for (uint next = 0; next < nodes_above_barrier.size(); next++) {
1025     Node* n = nodes_above_barrier.at(next);
1026     // Take anti-dependencies into account
1027     maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
1028     push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
1029   }
1030 }
1031 
1032 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
1033   Node* ctrl = phase->get_ctrl(barrier);
1034   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1035 
1036   // Update the control of all nodes that should be after the
1037   // barrier control flow
1038   uses.clear();
1039   // Every node that is control dependent on the barrier's input
1040   // control will be after the expanded barrier. The raw memory (if
1041   // its memory is control dependent on the barrier's input control)
1042   // must stay above the barrier.
1043   collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
1044   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1045     Node* u = ctrl->fast_out(i);
1046     if (u->_idx < last &&
1047         u != barrier &&
1048         !u->depends_only_on_test() && // preserve dependency on test
1049         !nodes_above_barrier.member(u) &&
1050         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1051         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1052       Node* old_c = phase->ctrl_or_self(u);
1053       if (old_c != ctrl ||
1054           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1055           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1056         phase->igvn().rehash_node_delayed(u);
1057         int nb = u->replace_edge(ctrl, region, &phase->igvn());
1058         if (u->is_CFG()) {
1059           if (phase->idom(u) == ctrl) {
1060             phase->set_idom(u, region, phase->dom_depth(region));
1061           }
1062         } else if (phase->get_ctrl(u) == ctrl) {
1063           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1064           uses.push(u);
1065         }
1066         assert(nb == 1, "more than 1 ctrl input?");
1067         --i, imax -= nb;
1068       }
1069     }
1070   }
1071 }
1072 
1073 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1074   Node* region = nullptr;
1075   while (c != ctrl) {
1076     if (c->is_Region()) {
1077       region = c;
1078     }
1079     c = phase->idom(c);
1080   }
1081   assert(region != nullptr, "");
1082   Node* phi = new PhiNode(region, n->bottom_type());
1083   for (uint j = 1; j < region->req(); j++) {
1084     Node* in = region->in(j);
1085     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1086       phi->init_req(j, n);
1087     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1088       phi->init_req(j, n_clone);
1089     } else {
1090       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1091     }
1092   }
1093   phase->register_new_node(phi, region);
1094   return phi;
1095 }
1096 
1097 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1098   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1099 
1100   Unique_Node_List uses;
1101   Node_Stack stack(0);
1102   Node_List clones;
1103   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1104     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1105 
1106     Node* ctrl = phase->get_ctrl(lrb);
1107     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1108 
1109     CallStaticJavaNode* unc = nullptr;
1110     Node* unc_ctrl = nullptr;
1111     Node* uncasted_val = val;
1112 
1113     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1114       Node* u = lrb->fast_out(i);
1115       if (u->Opcode() == Op_CastPP &&
1116           u->in(0) != nullptr &&
1117           phase->is_dominator(u->in(0), ctrl)) {
1118         const Type* u_t = phase->igvn().type(u);
1119 
1120         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1121             u->in(0)->Opcode() == Op_IfTrue &&
1122             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1123             u->in(0)->in(0)->is_If() &&
1124             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1125             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1126             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1127             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1128             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1129           IdealLoopTree* loop = phase->get_loop(ctrl);
1130           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1131 
1132           if (!unc_loop->is_member(loop)) {
1133             continue;
1134           }
1135 
1136           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1137           assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1138           if (branch == NodeSentinel) {
1139             continue;
1140           }
1141 
1142           Node* iff = u->in(0)->in(0);
1143           Node* bol = iff->in(1)->clone();
1144           Node* cmp = bol->in(1)->clone();
1145           cmp->set_req(1, lrb);
1146           bol->set_req(1, cmp);
1147           phase->igvn().replace_input_of(iff, 1, bol);
1148           phase->set_ctrl(lrb, iff->in(0));
1149           phase->register_new_node(cmp, iff->in(0));
1150           phase->register_new_node(bol, iff->in(0));
1151           break;
1152         }
1153       }
1154     }
1155     // Load barrier on the control output of a call
1156     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1157       CallJavaNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1158       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1159         // The rethrow call may have too many projections to be
1160         // properly handled here. Given there's no reason for a
1161         // barrier to depend on the call, move it above the call
1162         stack.push(lrb, 0);
1163         do {
1164           Node* n = stack.node();
1165           uint idx = stack.index();
1166           if (idx < n->req()) {
1167             Node* in = n->in(idx);
1168             stack.set_index(idx+1);
1169             if (in != nullptr) {
1170               if (phase->has_ctrl(in)) {
1171                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1172 #ifdef ASSERT
1173                   for (uint i = 0; i < stack.size(); i++) {
1174                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1175                   }
1176 #endif
1177                   stack.push(in, 0);
1178                 }
1179               } else {
1180                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1181               }
1182             }
1183           } else {
1184             phase->set_ctrl(n, call->in(0));
1185             stack.pop();
1186           }
1187         } while(stack.size() > 0);
1188         continue;
1189       }
1190       CallProjections projs;
1191       call->extract_projections(&projs, false, false);
1192 
1193       // If this is a runtime call, it doesn't have an exception handling path
1194       if (projs.fallthrough_catchproj == nullptr) {
1195         assert(call->method() == nullptr, "should be runtime call");
1196         assert(projs.catchall_catchproj == nullptr, "runtime call should not have catch all projection");
1197         continue;
1198       }
1199 
1200       // Otherwise, clone the barrier so there's one for the fallthrough and one for the exception handling path
1201 #ifdef ASSERT
1202       VectorSet cloned;
1203 #endif
1204       Node* lrb_clone = lrb->clone();
1205       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1206       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1207 
1208       stack.push(lrb, 0);
1209       clones.push(lrb_clone);
1210 
1211       do {
1212         assert(stack.size() == clones.size(), "");
1213         Node* n = stack.node();
1214 #ifdef ASSERT
1215         if (n->is_Load()) {
1216           Node* mem = n->in(MemNode::Memory);
1217           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1218             Node* u = mem->fast_out(j);
1219             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1220           }
1221         }
1222 #endif
1223         uint idx = stack.index();
1224         Node* n_clone = clones.at(clones.size()-1);
1225         if (idx < n->outcnt()) {
1226           Node* u = n->raw_out(idx);
1227           Node* c = phase->ctrl_or_self(u);
1228           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1229             stack.set_index(idx+1);
1230             assert(!u->is_CFG(), "");
1231             stack.push(u, 0);
1232             assert(!cloned.test_set(u->_idx), "only one clone");
1233             Node* u_clone = u->clone();
1234             int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1235             assert(nb > 0, "should have replaced some uses");
1236             phase->register_new_node(u_clone, projs.catchall_catchproj);
1237             clones.push(u_clone);
1238             phase->set_ctrl(u, projs.fallthrough_catchproj);
1239           } else {
1240             bool replaced = false;
1241             if (u->is_Phi()) {
1242               for (uint k = 1; k < u->req(); k++) {
1243                 if (u->in(k) == n) {
1244                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1245                     phase->igvn().replace_input_of(u, k, n_clone);
1246                     replaced = true;
1247                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1248                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1249                     replaced = true;
1250                   }
1251                 }
1252               }
1253             } else {
1254               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1255                 phase->igvn().rehash_node_delayed(u);
1256                 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1257                 assert(nb > 0, "should have replaced some uses");
1258                 replaced = true;
1259               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1260                 if (u->is_If()) {
1261                   // Can't break If/Bool/Cmp chain
1262                   assert(n->is_Bool(), "unexpected If shape");
1263                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1264                   assert(n_clone->is_Bool(), "unexpected clone");
1265                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1266                   Node* bol_clone = n->clone();
1267                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1268                   bol_clone->set_req(1, cmp_clone);
1269 
1270                   Node* nn = stack.node_at(stack.size()-3);
1271                   Node* nn_clone = clones.at(clones.size()-3);
1272                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1273 
1274                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1275                                                    &phase->igvn());
1276                   assert(nb > 0, "should have replaced some uses");
1277 
1278                   phase->register_new_node(bol_clone, u->in(0));
1279                   phase->register_new_node(cmp_clone, u->in(0));
1280 
1281                   phase->igvn().replace_input_of(u, 1, bol_clone);
1282 
1283                 } else {
1284                   phase->igvn().rehash_node_delayed(u);
1285                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1286                   assert(nb > 0, "should have replaced some uses");
1287                 }
1288                 replaced = true;
1289               }
1290             }
1291             if (!replaced) {
1292               stack.set_index(idx+1);
1293             }
1294           }
1295         } else {
1296           stack.pop();
1297           clones.pop();
1298         }
1299       } while (stack.size() > 0);
1300       assert(stack.size() == 0 && clones.size() == 0, "");
1301     }
1302   }
1303 
1304   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1305     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1306     Node* ctrl = phase->get_ctrl(lrb);
1307     IdealLoopTree* loop = phase->get_loop(ctrl);
1308     Node* head = loop->head();
1309     if (head->is_OuterStripMinedLoop()) {
1310       // Expanding a barrier here will break loop strip mining
1311       // verification. Transform the loop so the loop nest doesn't
1312       // appear as strip mined.
1313       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1314       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1315     }
1316     if (head->is_BaseCountedLoop() && ctrl->is_IfProj() && ctrl->in(0)->is_BaseCountedLoopEnd() &&
1317         head->as_BaseCountedLoop()->loopexit() == ctrl->in(0)) {
1318       Node* entry = head->in(LoopNode::EntryControl);
1319       Node* backedge = head->in(LoopNode::LoopBackControl);
1320       Node* new_head = new LoopNode(entry, backedge);
1321       phase->register_control(new_head, phase->get_loop(entry), entry);
1322       phase->lazy_replace(head, new_head);
1323     }
1324   }
1325 
1326   // Expand load-reference-barriers
1327   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1328   Unique_Node_List nodes_above_barriers;
1329   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1330     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1331     uint last = phase->C->unique();
1332     Node* ctrl = phase->get_ctrl(lrb);
1333     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1334 
1335     Node* orig_ctrl = ctrl;
1336 
1337     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1338     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1339 
1340     IdealLoopTree *loop = phase->get_loop(ctrl);
1341 
1342     Node* heap_stable_ctrl = nullptr;
1343     Node* null_ctrl = nullptr;
1344 
1345     assert(val->bottom_type()->make_oopptr(), "need oop");
1346     assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1347 
1348     enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1349     Node* region = new RegionNode(PATH_LIMIT);
1350     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1351 
1352     // Stable path.
1353     int flags = ShenandoahHeap::HAS_FORWARDED;
1354     if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1355       flags |= ShenandoahHeap::WEAK_ROOTS;
1356     }
1357     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1358     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1359 
1360     // Heap stable case
1361     region->init_req(_heap_stable, heap_stable_ctrl);
1362     val_phi->init_req(_heap_stable, val);
1363 
1364     // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1365     // even for non-cset objects to prevent resurrection of such objects.
1366     // Wires !in_cset(obj) to slot 2 of region and phis
1367     Node* not_cset_ctrl = nullptr;
1368     if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1369       test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1370     }
1371     if (not_cset_ctrl != nullptr) {
1372       region->init_req(_not_cset, not_cset_ctrl);
1373       val_phi->init_req(_not_cset, val);
1374     } else {
1375       region->del_req(_not_cset);
1376       val_phi->del_req(_not_cset);
1377     }
1378 
1379     // Resolve object when orig-value is in cset.
1380     // Make the unconditional resolve for fwdptr.
1381 
1382     // Call lrb-stub and wire up that path in slots 4
1383     Node* result_mem = nullptr;
1384 
1385     Node* addr;
1386     {
1387       VectorSet visited;
1388       addr = get_load_addr(phase, visited, lrb);
1389     }
1390     if (addr->Opcode() == Op_AddP) {
1391       Node* orig_base = addr->in(AddPNode::Base);
1392       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1393       phase->register_new_node(base, ctrl);
1394       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1395         // Field access
1396         addr = addr->clone();
1397         addr->set_req(AddPNode::Base, base);
1398         addr->set_req(AddPNode::Address, base);
1399         phase->register_new_node(addr, ctrl);
1400       } else {
1401         Node* addr2 = addr->in(AddPNode::Address);
1402         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1403               addr2->in(AddPNode::Base) == orig_base) {
1404           addr2 = addr2->clone();
1405           addr2->set_req(AddPNode::Base, base);
1406           addr2->set_req(AddPNode::Address, base);
1407           phase->register_new_node(addr2, ctrl);
1408           addr = addr->clone();
1409           addr->set_req(AddPNode::Base, base);
1410           addr->set_req(AddPNode::Address, addr2);
1411           phase->register_new_node(addr, ctrl);
1412         }
1413       }
1414     }
1415     call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1416     region->init_req(_evac_path, ctrl);
1417     val_phi->init_req(_evac_path, val);
1418 
1419     phase->register_control(region, loop, heap_stable_iff);
1420     Node* out_val = val_phi;
1421     phase->register_new_node(val_phi, region);
1422 
1423     fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
1424 
1425     ctrl = orig_ctrl;
1426 
1427     phase->igvn().replace_node(lrb, out_val);
1428 
1429     follow_barrier_uses(out_val, ctrl, uses, phase);
1430 
1431     for(uint next = 0; next < uses.size(); next++ ) {
1432       Node *n = uses.at(next);
1433       assert(phase->get_ctrl(n) == ctrl, "bad control");
1434       assert(n != raw_mem, "should leave input raw mem above the barrier");
1435       phase->set_ctrl(n, region);
1436       follow_barrier_uses(n, ctrl, uses, phase);
1437     }
1438     fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1439   }
1440   // Done expanding load-reference-barriers.
1441   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1442 }
1443 
1444 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1445   if (visited.test_set(in->_idx)) {
1446     return nullptr;
1447   }
1448   switch (in->Opcode()) {
1449     case Op_Proj:
1450       return get_load_addr(phase, visited, in->in(0));
1451     case Op_CastPP:
1452     case Op_CheckCastPP:
1453     case Op_DecodeN:
1454     case Op_EncodeP:
1455       return get_load_addr(phase, visited, in->in(1));
1456     case Op_LoadN:
1457     case Op_LoadP:
1458       return in->in(MemNode::Address);
1459     case Op_CompareAndExchangeN:
1460     case Op_CompareAndExchangeP:
1461     case Op_GetAndSetN:
1462     case Op_GetAndSetP:
1463     case Op_ShenandoahCompareAndExchangeP:
1464     case Op_ShenandoahCompareAndExchangeN:
1465       // Those instructions would just have stored a different
1466       // value into the field. No use to attempt to fix it at this point.
1467       return phase->igvn().zerocon(T_OBJECT);
1468     case Op_CMoveP:
1469     case Op_CMoveN: {
1470       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1471       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1472       // Handle unambiguous cases: single address reported on both branches.
1473       if (t != nullptr && f == nullptr) return t;
1474       if (t == nullptr && f != nullptr) return f;
1475       if (t != nullptr && t == f)    return t;
1476       // Ambiguity.
1477       return phase->igvn().zerocon(T_OBJECT);
1478     }
1479     case Op_Phi: {
1480       Node* addr = nullptr;
1481       for (uint i = 1; i < in->req(); i++) {
1482         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1483         if (addr == nullptr) {
1484           addr = addr1;
1485         }
1486         if (addr != addr1) {
1487           return phase->igvn().zerocon(T_OBJECT);
1488         }
1489       }
1490       return addr;
1491     }
1492     case Op_ShenandoahLoadReferenceBarrier:
1493       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1494     case Op_CallDynamicJava:
1495     case Op_CallLeaf:
1496     case Op_CallStaticJava:
1497     case Op_ConN:
1498     case Op_ConP:
1499     case Op_Parm:
1500     case Op_CreateEx:
1501       return phase->igvn().zerocon(T_OBJECT);
1502     default:
1503 #ifdef ASSERT
1504       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1505 #endif
1506       return phase->igvn().zerocon(T_OBJECT);
1507   }
1508 
1509 }
1510 
1511 #ifdef ASSERT
1512 static bool has_never_branch(Node* root) {
1513   for (uint i = 1; i < root->req(); i++) {
1514     Node* in = root->in(i);
1515     if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
1516       return true;
1517     }
1518   }
1519   return false;
1520 }
1521 #endif
1522 
1523 void MemoryGraphFixer::collect_memory_nodes() {
1524   Node_Stack stack(0);
1525   VectorSet visited;
1526   Node_List regions;
1527 
1528   // Walk the raw memory graph and create a mapping from CFG node to
1529   // memory node. Exclude phis for now.
1530   stack.push(_phase->C->root(), 1);
1531   do {
1532     Node* n = stack.node();
1533     int opc = n->Opcode();
1534     uint i = stack.index();
1535     if (i < n->req()) {
1536       Node* mem = nullptr;
1537       if (opc == Op_Root) {
1538         Node* in = n->in(i);
1539         int in_opc = in->Opcode();
1540         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
1541           mem = in->in(TypeFunc::Memory);
1542         } else if (in_opc == Op_Halt) {
1543           if (in->in(0)->is_Region()) {
1544             Node* r = in->in(0);
1545             for (uint j = 1; j < r->req(); j++) {
1546               assert(!r->in(j)->is_NeverBranch(), "");
1547             }
1548           } else {
1549             Node* proj = in->in(0);
1550             assert(proj->is_Proj(), "");
1551             Node* in = proj->in(0);
1552             assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
1553             if (in->is_CallStaticJava()) {
1554               mem = in->in(TypeFunc::Memory);
1555             } else if (in->Opcode() == Op_Catch) {
1556               Node* call = in->in(0)->in(0);
1557               assert(call->is_Call(), "");
1558               mem = call->in(TypeFunc::Memory);
1559             } else if (in->is_NeverBranch()) {
1560               mem = collect_memory_for_infinite_loop(in);
1561             }
1562           }
1563         } else {
1564 #ifdef ASSERT
1565           n->dump();
1566           in->dump();
1567 #endif
1568           ShouldNotReachHere();
1569         }
1570       } else {
1571         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
1572         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
1573         mem = n->in(i);
1574       }
1575       i++;
1576       stack.set_index(i);
1577       if (mem == nullptr) {
1578         continue;
1579       }
1580       for (;;) {
1581         if (visited.test_set(mem->_idx) || mem->is_Start()) {
1582           break;
1583         }
1584         if (mem->is_Phi()) {
1585           stack.push(mem, 2);
1586           mem = mem->in(1);
1587         } else if (mem->is_Proj()) {
1588           stack.push(mem, mem->req());
1589           mem = mem->in(0);
1590         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
1591           mem = mem->in(TypeFunc::Memory);
1592         } else if (mem->is_MergeMem()) {
1593           MergeMemNode* mm = mem->as_MergeMem();
1594           mem = mm->memory_at(_alias);
1595         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1596           assert(_alias == Compile::AliasIdxRaw, "");
1597           stack.push(mem, mem->req());
1598           mem = mem->in(MemNode::Memory);
1599         } else {
1600 #ifdef ASSERT
1601           mem->dump();
1602 #endif
1603           ShouldNotReachHere();
1604         }
1605       }
1606     } else {
1607       if (n->is_Phi()) {
1608         // Nothing
1609       } else if (!n->is_Root()) {
1610         Node* c = get_ctrl(n);
1611         _memory_nodes.map(c->_idx, n);
1612       }
1613       stack.pop();
1614     }
1615   } while(stack.is_nonempty());
1616 
1617   // Iterate over CFG nodes in rpo and propagate memory state to
1618   // compute memory state at regions, creating new phis if needed.
1619   Node_List rpo_list;
1620   visited.clear();
1621   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
1622   Node* root = rpo_list.pop();
1623   assert(root == _phase->C->root(), "");
1624 
1625   const bool trace = false;
1626 #ifdef ASSERT
1627   if (trace) {
1628     for (int i = rpo_list.size() - 1; i >= 0; i--) {
1629       Node* c = rpo_list.at(i);
1630       if (_memory_nodes[c->_idx] != nullptr) {
1631         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
1632       }
1633     }
1634   }
1635 #endif
1636   uint last = _phase->C->unique();
1637 
1638 #ifdef ASSERT
1639   uint16_t max_depth = 0;
1640   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
1641     IdealLoopTree* lpt = iter.current();
1642     max_depth = MAX2(max_depth, lpt->_nest);
1643   }
1644 #endif
1645 
1646   bool progress = true;
1647   int iteration = 0;
1648   Node_List dead_phis;
1649   while (progress) {
1650     progress = false;
1651     iteration++;
1652     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1653     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
1654 
1655     for (int i = rpo_list.size() - 1; i >= 0; i--) {
1656       Node* c = rpo_list.at(i);
1657 
1658       Node* prev_mem = _memory_nodes[c->_idx];
1659       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1660         Node* prev_region = regions[c->_idx];
1661         Node* unique = nullptr;
1662         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
1663           Node* m = _memory_nodes[c->in(j)->_idx];
1664           assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
1665           if (m != nullptr) {
1666             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
1667               assert((c->is_Loop() && j == LoopNode::LoopBackControl) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1668               // continue
1669             } else if (unique == nullptr) {
1670               unique = m;
1671             } else if (m == unique) {
1672               // continue
1673             } else {
1674               unique = NodeSentinel;
1675             }
1676           }
1677         }
1678         assert(unique != nullptr, "empty phi???");
1679         if (unique != NodeSentinel) {
1680           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
1681             dead_phis.push(prev_region);
1682           }
1683           regions.map(c->_idx, unique);
1684         } else {
1685           Node* phi = nullptr;
1686           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
1687             phi = prev_region;
1688             for (uint k = 1; k < c->req(); k++) {
1689               Node* m = _memory_nodes[c->in(k)->_idx];
1690               assert(m != nullptr, "expect memory state");
1691               phi->set_req(k, m);
1692             }
1693           } else {
1694             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
1695               Node* u = c->fast_out(j);
1696               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1697                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
1698                 phi = u;
1699                 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
1700                   Node* m = _memory_nodes[c->in(k)->_idx];
1701                   assert(m != nullptr, "expect memory state");
1702                   if (u->in(k) != m) {
1703                     phi = NodeSentinel;
1704                   }
1705                 }
1706               }
1707             }
1708             if (phi == NodeSentinel) {
1709               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
1710               for (uint k = 1; k < c->req(); k++) {
1711                 Node* m = _memory_nodes[c->in(k)->_idx];
1712                 assert(m != nullptr, "expect memory state");
1713                 phi->init_req(k, m);
1714               }
1715             }
1716           }
1717           if (phi != nullptr) {
1718             regions.map(c->_idx, phi);
1719           } else {
1720             assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1721           }
1722         }
1723         Node* current_region = regions[c->_idx];
1724         if (current_region != prev_region) {
1725           progress = true;
1726           if (prev_region == prev_mem) {
1727             _memory_nodes.map(c->_idx, current_region);
1728           }
1729         }
1730       } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
1731         Node* m = _memory_nodes[_phase->idom(c)->_idx];
1732         assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
1733         if (m != prev_mem) {
1734           _memory_nodes.map(c->_idx, m);
1735           progress = true;
1736         }
1737       }
1738 #ifdef ASSERT
1739       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
1740 #endif
1741     }
1742   }
1743 
1744   // Replace existing phi with computed memory state for that region
1745   // if different (could be a new phi or a dominating memory node if
1746   // that phi was found to be useless).
1747   while (dead_phis.size() > 0) {
1748     Node* n = dead_phis.pop();
1749     n->replace_by(_phase->C->top());
1750     n->destruct(&_phase->igvn());
1751   }
1752   for (int i = rpo_list.size() - 1; i >= 0; i--) {
1753     Node* c = rpo_list.at(i);
1754     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1755       Node* n = regions[c->_idx];
1756       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1757       if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
1758         _phase->register_new_node(n, c);
1759       }
1760     }
1761   }
1762   for (int i = rpo_list.size() - 1; i >= 0; i--) {
1763     Node* c = rpo_list.at(i);
1764     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1765       Node* n = regions[c->_idx];
1766       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1767       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
1768         Node* u = c->fast_out(i);
1769         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1770             u != n) {
1771           assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
1772           if (u->adr_type() == TypePtr::BOTTOM) {
1773             fix_memory_uses(u, n, n, c);
1774           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1775             _phase->lazy_replace(u, n);
1776             --i; --imax;
1777           }
1778         }
1779       }
1780     }
1781   }
1782 }
1783 
1784 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
1785   Node* mem = nullptr;
1786   Node* head = in->in(0);
1787   assert(head->is_Region(), "unexpected infinite loop graph shape");
1788 
1789   Node* phi_mem = nullptr;
1790   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
1791     Node* u = head->fast_out(j);
1792     if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
1793       if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1794         assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
1795         phi_mem = u;
1796       } else if (u->adr_type() == TypePtr::BOTTOM) {
1797         assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
1798         if (phi_mem == nullptr) {
1799           phi_mem = u;
1800         }
1801       }
1802     }
1803   }
1804   if (phi_mem == nullptr) {
1805     ResourceMark rm;
1806     Node_Stack stack(0);
1807     stack.push(head, 1);
1808     do {
1809       Node* n = stack.node();
1810       uint i = stack.index();
1811       if (i >= n->req()) {
1812         stack.pop();
1813       } else {
1814         stack.set_index(i + 1);
1815         Node* c = n->in(i);
1816         assert(c != head, "should have found a safepoint on the way");
1817         if (stack.size() != 1 || _phase->is_dominator(head, c)) {
1818           for (;;) {
1819             if (c->is_Region()) {
1820               stack.push(c, 1);
1821               break;
1822             } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
1823               Node* m = c->in(TypeFunc::Memory);
1824               if (m->is_MergeMem()) {
1825                 m = m->as_MergeMem()->memory_at(_alias);
1826               }
1827               assert(mem == nullptr || mem == m, "several memory states");
1828               mem = m;
1829               break;
1830             } else {
1831               assert(c != c->in(0), "");
1832               c = c->in(0);
1833             }
1834           }
1835         }
1836       }
1837     } while (stack.size() > 0);
1838     assert(mem != nullptr, "should have found safepoint");
1839   } else {
1840     mem = phi_mem;
1841   }
1842   return mem;
1843 }
1844 
1845 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
1846   Node* c = _phase->get_ctrl(n);
1847   if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
1848     assert(c == n->in(0), "");
1849     CallNode* call = c->as_Call();
1850     CallProjections projs;
1851     call->extract_projections(&projs, true, false);
1852     if (projs.catchall_memproj != nullptr) {
1853       if (projs.fallthrough_memproj == n) {
1854         c = projs.fallthrough_catchproj;
1855       } else {
1856         assert(projs.catchall_memproj == n, "");
1857         c = projs.catchall_catchproj;
1858       }
1859     }
1860   }
1861   return c;
1862 }
1863 
1864 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
1865   if (_phase->has_ctrl(n))
1866     return get_ctrl(n);
1867   else {
1868     assert (n->is_CFG(), "must be a CFG node");
1869     return n;
1870   }
1871 }
1872 
1873 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
1874   return m != nullptr && get_ctrl(m) == c;
1875 }
1876 
1877 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
1878   assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
1879   assert(!ctrl->is_Call() || ctrl == n, "projection expected");
1880 #ifdef ASSERT
1881   if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
1882       (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
1883     CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
1884     int mems = 0;
1885     for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
1886       Node* u = call->fast_out(i);
1887       if (u->bottom_type() == Type::MEMORY) {
1888         mems++;
1889       }
1890     }
1891     assert(mems <= 1, "No node right after call if multiple mem projections");
1892   }
1893 #endif
1894   Node* mem = _memory_nodes[ctrl->_idx];
1895   Node* c = ctrl;
1896   while (!mem_is_valid(mem, c) &&
1897          (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
1898     c = _phase->idom(c);
1899     mem = _memory_nodes[c->_idx];
1900   }
1901   if (n != nullptr && mem_is_valid(mem, c)) {
1902     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
1903       mem = next_mem(mem, _alias);
1904     }
1905     if (mem->is_MergeMem()) {
1906       mem = mem->as_MergeMem()->memory_at(_alias);
1907     }
1908     if (!mem_is_valid(mem, c)) {
1909       do {
1910         c = _phase->idom(c);
1911         mem = _memory_nodes[c->_idx];
1912       } while (!mem_is_valid(mem, c) &&
1913                (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
1914     }
1915   }
1916   assert(mem->bottom_type() == Type::MEMORY, "");
1917   return mem;
1918 }
1919 
1920 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
1921   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1922     Node* use = region->fast_out(i);
1923     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
1924         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
1925       return true;
1926     }
1927   }
1928   return false;
1929 }
1930 
1931 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
1932   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
1933   const bool trace = false;
1934   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
1935   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
1936   GrowableArray<Node*> phis;
1937   if (mem_for_ctrl != mem) {
1938     Node* old = mem_for_ctrl;
1939     Node* prev = nullptr;
1940     while (old != mem) {
1941       prev = old;
1942       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
1943         assert(_alias == Compile::AliasIdxRaw, "");
1944         old = old->in(MemNode::Memory);
1945       } else if (old->Opcode() == Op_SCMemProj) {
1946         assert(_alias == Compile::AliasIdxRaw, "");
1947         old = old->in(0);
1948       } else {
1949         ShouldNotReachHere();
1950       }
1951     }
1952     assert(prev != nullptr, "");
1953     if (new_ctrl != ctrl) {
1954       _memory_nodes.map(ctrl->_idx, mem);
1955       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
1956     }
1957     uint input = (uint)MemNode::Memory;
1958     _phase->igvn().replace_input_of(prev, input, new_mem);
1959   } else {
1960     uses.clear();
1961     _memory_nodes.map(new_ctrl->_idx, new_mem);
1962     uses.push(new_ctrl);
1963     for(uint next = 0; next < uses.size(); next++ ) {
1964       Node *n = uses.at(next);
1965       assert(n->is_CFG(), "");
1966       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
1967       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1968         Node* u = n->fast_out(i);
1969         if (!u->is_Root() && u->is_CFG() && u != n) {
1970           Node* m = _memory_nodes[u->_idx];
1971           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
1972               !has_mem_phi(u) &&
1973               u->unique_ctrl_out()->Opcode() != Op_Halt) {
1974             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
1975             DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
1976 
1977             if (!mem_is_valid(m, u) || !m->is_Phi()) {
1978               bool push = true;
1979               bool create_phi = true;
1980               if (_phase->is_dominator(new_ctrl, u)) {
1981                 create_phi = false;
1982               }
1983               if (create_phi) {
1984                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
1985                 _phase->register_new_node(phi, u);
1986                 phis.push(phi);
1987                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
1988                 if (!mem_is_valid(m, u)) {
1989                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
1990                   _memory_nodes.map(u->_idx, phi);
1991                 } else {
1992                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
1993                   for (;;) {
1994                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
1995                     Node* next = nullptr;
1996                     if (m->is_Proj()) {
1997                       next = m->in(0);
1998                     } else {
1999                       assert(m->is_Mem() || m->is_LoadStore(), "");
2000                       assert(_alias == Compile::AliasIdxRaw, "");
2001                       next = m->in(MemNode::Memory);
2002                     }
2003                     if (_phase->get_ctrl(next) != u) {
2004                       break;
2005                     }
2006                     if (next->is_MergeMem()) {
2007                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2008                       break;
2009                     }
2010                     if (next->is_Phi()) {
2011                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2012                       break;
2013                     }
2014                     m = next;
2015                   }
2016 
2017                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2018                   assert(m->is_Mem() || m->is_LoadStore(), "");
2019                   uint input = (uint)MemNode::Memory;
2020                   _phase->igvn().replace_input_of(m, input, phi);
2021                   push = false;
2022                 }
2023               } else {
2024                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2025               }
2026               if (push) {
2027                 uses.push(u);
2028               }
2029             }
2030           } else if (!mem_is_valid(m, u) &&
2031                      !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2032             uses.push(u);
2033           }
2034         }
2035       }
2036     }
2037     for (int i = 0; i < phis.length(); i++) {
2038       Node* n = phis.at(i);
2039       Node* r = n->in(0);
2040       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2041       for (uint j = 1; j < n->req(); j++) {
2042         Node* m = find_mem(r->in(j), nullptr);
2043         _phase->igvn().replace_input_of(n, j, m);
2044         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2045       }
2046     }
2047   }
2048   uint last = _phase->C->unique();
2049   MergeMemNode* mm = nullptr;
2050   int alias = _alias;
2051   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2052   // Process loads first to not miss an anti-dependency: if the memory
2053   // edge of a store is updated before a load is processed then an
2054   // anti-dependency may be missed.
2055   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2056     Node* u = mem->out(i);
2057     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2058       Node* m = find_mem(_phase->get_ctrl(u), u);
2059       if (m != mem) {
2060         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2061         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2062         --i;
2063       }
2064     }
2065   }
2066   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2067     Node* u = mem->out(i);
2068     if (u->_idx < last) {
2069       if (u->is_Mem()) {
2070         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2071           Node* m = find_mem(_phase->get_ctrl(u), u);
2072           if (m != mem) {
2073             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2074             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2075             --i;
2076           }
2077         }
2078       } else if (u->is_MergeMem()) {
2079         MergeMemNode* u_mm = u->as_MergeMem();
2080         if (u_mm->memory_at(alias) == mem) {
2081           MergeMemNode* newmm = nullptr;
2082           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2083             Node* uu = u->fast_out(j);
2084             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2085             if (uu->is_Phi()) {
2086               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2087               Node* region = uu->in(0);
2088               int nb = 0;
2089               for (uint k = 1; k < uu->req(); k++) {
2090                 if (uu->in(k) == u) {
2091                   Node* m = find_mem(region->in(k), nullptr);
2092                   if (m != mem) {
2093                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2094                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2095                     if (newmm != u) {
2096                       _phase->igvn().replace_input_of(uu, k, newmm);
2097                       nb++;
2098                       --jmax;
2099                     }
2100                   }
2101                 }
2102               }
2103               if (nb > 0) {
2104                 --j;
2105               }
2106             } else {
2107               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2108               if (m != mem) {
2109                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2110                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2111                 if (newmm != u) {
2112                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2113                   --j, --jmax;
2114                 }
2115               }
2116             }
2117           }
2118         }
2119       } else if (u->is_Phi()) {
2120         assert(u->bottom_type() == Type::MEMORY, "what else?");
2121         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2122           Node* region = u->in(0);
2123           bool replaced = false;
2124           for (uint j = 1; j < u->req(); j++) {
2125             if (u->in(j) == mem) {
2126               Node* m = find_mem(region->in(j), nullptr);
2127               Node* nnew = m;
2128               if (m != mem) {
2129                 if (u->adr_type() == TypePtr::BOTTOM) {
2130                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2131                   nnew = mm;
2132                 }
2133                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2134                 _phase->igvn().replace_input_of(u, j, nnew);
2135                 replaced = true;
2136               }
2137             }
2138           }
2139           if (replaced) {
2140             --i;
2141           }
2142         }
2143       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2144                  u->adr_type() == nullptr) {
2145         assert(u->adr_type() != nullptr ||
2146                u->Opcode() == Op_Rethrow ||
2147                u->Opcode() == Op_Return ||
2148                u->Opcode() == Op_SafePoint ||
2149                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2150                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2151                u->Opcode() == Op_CallLeaf, "");
2152         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2153         if (m != mem) {
2154           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2155           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2156           --i;
2157         }
2158       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2159         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2160         if (m != mem) {
2161           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2162           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2163           --i;
2164         }
2165       } else if (u->adr_type() != TypePtr::BOTTOM &&
2166                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2167         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2168         assert(m != mem, "");
2169         // u is on the wrong slice...
2170         assert(u->is_ClearArray(), "");
2171         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2172         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2173         --i;
2174       }
2175     }
2176   }
2177 #ifdef ASSERT
2178   assert(new_mem->outcnt() > 0, "");
2179   for (int i = 0; i < phis.length(); i++) {
2180     Node* n = phis.at(i);
2181     assert(n->outcnt() > 0, "new phi must have uses now");
2182   }
2183 #endif
2184 }
2185 
2186 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2187   if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2188     _memory_nodes.map(ctrl->_idx, mem);
2189     _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2190   }
2191 }
2192 
2193 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2194   MergeMemNode* mm = MergeMemNode::make(mem);
2195   mm->set_memory_at(_alias, rep_proj);
2196   _phase->register_new_node(mm, rep_ctrl);
2197   return mm;
2198 }
2199 
2200 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2201   MergeMemNode* newmm = nullptr;
2202   MergeMemNode* u_mm = u->as_MergeMem();
2203   Node* c = _phase->get_ctrl(u);
2204   if (_phase->is_dominator(c, rep_ctrl)) {
2205     c = rep_ctrl;
2206   } else {
2207     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2208   }
2209   if (u->outcnt() == 1) {
2210     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2211       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2212       --i;
2213     } else {
2214       _phase->igvn().rehash_node_delayed(u);
2215       u_mm->set_memory_at(_alias, rep_proj);
2216     }
2217     newmm = u_mm;
2218     _phase->set_ctrl_and_loop(u, c);
2219   } else {
2220     // can't simply clone u and then change one of its input because
2221     // it adds and then removes an edge which messes with the
2222     // DUIterator
2223     newmm = MergeMemNode::make(u_mm->base_memory());
2224     for (uint j = 0; j < u->req(); j++) {
2225       if (j < newmm->req()) {
2226         if (j == (uint)_alias) {
2227           newmm->set_req(j, rep_proj);
2228         } else if (newmm->in(j) != u->in(j)) {
2229           newmm->set_req(j, u->in(j));
2230         }
2231       } else if (j == (uint)_alias) {
2232         newmm->add_req(rep_proj);
2233       } else {
2234         newmm->add_req(u->in(j));
2235       }
2236     }
2237     if ((uint)_alias >= u->req()) {
2238       newmm->set_memory_at(_alias, rep_proj);
2239     }
2240     _phase->register_new_node(newmm, c);
2241   }
2242   return newmm;
2243 }
2244 
2245 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2246   if (phi->adr_type() == TypePtr::BOTTOM) {
2247     Node* region = phi->in(0);
2248     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2249       Node* uu = region->fast_out(j);
2250       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2251         return false;
2252       }
2253     }
2254     return true;
2255   }
2256   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2257 }
2258 
2259 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2260   uint last = _phase-> C->unique();
2261   MergeMemNode* mm = nullptr;
2262   assert(mem->bottom_type() == Type::MEMORY, "");
2263   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2264     Node* u = mem->out(i);
2265     if (u != replacement && u->_idx < last) {
2266       if (u->is_MergeMem()) {
2267         MergeMemNode* u_mm = u->as_MergeMem();
2268         if (u_mm->memory_at(_alias) == mem) {
2269           MergeMemNode* newmm = nullptr;
2270           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2271             Node* uu = u->fast_out(j);
2272             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2273             if (uu->is_Phi()) {
2274               if (should_process_phi(uu)) {
2275                 Node* region = uu->in(0);
2276                 int nb = 0;
2277                 for (uint k = 1; k < uu->req(); k++) {
2278                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2279                     if (newmm == nullptr) {
2280                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2281                     }
2282                     if (newmm != u) {
2283                       _phase->igvn().replace_input_of(uu, k, newmm);
2284                       nb++;
2285                       --jmax;
2286                     }
2287                   }
2288                 }
2289                 if (nb > 0) {
2290                   --j;
2291                 }
2292               }
2293             } else {
2294               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2295                 if (newmm == nullptr) {
2296                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2297                 }
2298                 if (newmm != u) {
2299                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2300                   --j, --jmax;
2301                 }
2302               }
2303             }
2304           }
2305         }
2306       } else if (u->is_Phi()) {
2307         assert(u->bottom_type() == Type::MEMORY, "what else?");
2308         Node* region = u->in(0);
2309         if (should_process_phi(u)) {
2310           bool replaced = false;
2311           for (uint j = 1; j < u->req(); j++) {
2312             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2313               Node* nnew = rep_proj;
2314               if (u->adr_type() == TypePtr::BOTTOM) {
2315                 if (mm == nullptr) {
2316                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2317                 }
2318                 nnew = mm;
2319               }
2320               _phase->igvn().replace_input_of(u, j, nnew);
2321               replaced = true;
2322             }
2323           }
2324           if (replaced) {
2325             --i;
2326           }
2327 
2328         }
2329       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2330                  u->adr_type() == nullptr) {
2331         assert(u->adr_type() != nullptr ||
2332                u->Opcode() == Op_Rethrow ||
2333                u->Opcode() == Op_Return ||
2334                u->Opcode() == Op_SafePoint ||
2335                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2336                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2337                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2338         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2339           if (mm == nullptr) {
2340             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2341           }
2342           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2343           --i;
2344         }
2345       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2346         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2347           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2348           --i;
2349         }
2350       }
2351     }
2352   }
2353 }
2354 
2355 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2356 : Node(ctrl, obj), _decorators(decorators) {
2357   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2358 }
2359 
2360 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2361   return _decorators;
2362 }
2363 
2364 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2365   return sizeof(*this);
2366 }
2367 
2368 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2369   return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2370 }
2371 
2372 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2373   uint hash = Node::hash();
2374   hash += mask_decorators(_decorators);
2375   return hash;
2376 }
2377 
2378 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2379   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2380          mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2381 }
2382 
2383 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2384   if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2385     return Type::TOP;
2386   }
2387   const Type* t = in(ValueIn)->bottom_type();
2388   if (t == TypePtr::NULL_PTR) {
2389     return t;
2390   }
2391 
2392   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2393     return t;
2394   }
2395 
2396   return t->meet(TypePtr::NULL_PTR);
2397 }
2398 
2399 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2400   // Either input is TOP ==> the result is TOP
2401   const Type *t2 = phase->type(in(ValueIn));
2402   if( t2 == Type::TOP ) return Type::TOP;
2403 
2404   if (t2 == TypePtr::NULL_PTR) {
2405     return t2;
2406   }
2407 
2408   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2409     return t2;
2410   }
2411 
2412   return t2->meet(TypePtr::NULL_PTR);
2413 }
2414 
2415 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2416   Node* value = in(ValueIn);
2417   if (!needs_barrier(phase, value)) {
2418     return value;
2419   }
2420   return this;
2421 }
2422 
2423 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2424   Unique_Node_List visited;
2425   return needs_barrier_impl(phase, n, visited);
2426 }
2427 
2428 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2429   if (n == nullptr) return false;
2430   if (visited.member(n)) {
2431     return false; // Been there.
2432   }
2433   visited.push(n);
2434 
2435   if (n->is_Allocate()) {
2436     // tty->print_cr("optimize barrier on alloc");
2437     return false;
2438   }
2439   if (n->is_Call()) {
2440     // tty->print_cr("optimize barrier on call");
2441     return false;
2442   }
2443 
2444   const Type* type = phase->type(n);
2445   if (type == Type::TOP) {
2446     return false;
2447   }
2448   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2449     // tty->print_cr("optimize barrier on null");
2450     return false;
2451   }
2452   if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2453     // tty->print_cr("optimize barrier on constant");
2454     return false;
2455   }
2456 
2457   switch (n->Opcode()) {
2458     case Op_AddP:
2459       return true; // TODO: Can refine?
2460     case Op_LoadP:
2461     case Op_ShenandoahCompareAndExchangeN:
2462     case Op_ShenandoahCompareAndExchangeP:
2463     case Op_CompareAndExchangeN:
2464     case Op_CompareAndExchangeP:
2465     case Op_GetAndSetN:
2466     case Op_GetAndSetP:
2467       return true;
2468     case Op_Phi: {
2469       for (uint i = 1; i < n->req(); i++) {
2470         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2471       }
2472       return false;
2473     }
2474     case Op_CheckCastPP:
2475     case Op_CastPP:
2476       return needs_barrier_impl(phase, n->in(1), visited);
2477     case Op_Proj:
2478       return needs_barrier_impl(phase, n->in(0), visited);
2479     case Op_ShenandoahLoadReferenceBarrier:
2480       // tty->print_cr("optimize barrier on barrier");
2481       return false;
2482     case Op_Parm:
2483       // tty->print_cr("optimize barrier on input arg");
2484       return false;
2485     case Op_DecodeN:
2486     case Op_EncodeP:
2487       return needs_barrier_impl(phase, n->in(1), visited);
2488     case Op_LoadN:
2489       return true;
2490     case Op_CMoveN:
2491     case Op_CMoveP:
2492       return needs_barrier_impl(phase, n->in(2), visited) ||
2493              needs_barrier_impl(phase, n->in(3), visited);
2494     case Op_CreateEx:
2495       return false;
2496     default:
2497       break;
2498   }
2499 #ifdef ASSERT
2500   tty->print("need barrier on?: ");
2501   tty->print_cr("ins:");
2502   n->dump(2);
2503   tty->print_cr("outs:");
2504   n->dump(-2);
2505   ShouldNotReachHere();
2506 #endif
2507   return true;
2508 }