1 /*
   2  * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  32 #include "gc/shenandoah/shenandoahForwarding.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahRuntime.hpp"
  36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/callnode.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 
  47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  48   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  49   if ((state->iu_barriers_count() +
  50        state->load_reference_barriers_count()) > 0) {
  51     assert(C->post_loop_opts_phase(), "no loop opts allowed");
  52     C->reset_post_loop_opts_phase(); // ... but we know what we are doing
  53     C->clear_major_progress();
  54     PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
  55     if (C->failing()) return false;
  56 
  57     C->set_major_progress();
  58     if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  59       return false;
  60     }
  61     C->clear_major_progress();
  62     C->process_for_post_loop_opts_igvn(igvn);
  63     if (C->failing()) return false;
  64 
  65     C->set_post_loop_opts_phase(); // now for real!
  66   }
  67   return true;
  68 }
  69 
  70 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  71   if (!UseShenandoahGC) {
  72     return false;
  73   }
  74   assert(iff->is_If(), "bad input");
  75   if (iff->Opcode() != Op_If) {
  76     return false;
  77   }
  78   Node* bol = iff->in(1);
  79   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  80     return false;
  81   }
  82   Node* cmp = bol->in(1);
  83   if (cmp->Opcode() != Op_CmpI) {
  84     return false;
  85   }
  86   Node* in1 = cmp->in(1);
  87   Node* in2 = cmp->in(2);
  88   if (in2->find_int_con(-1) != 0) {
  89     return false;
  90   }
  91   if (in1->Opcode() != Op_AndI) {
  92     return false;
  93   }
  94   in2 = in1->in(2);
  95   if (in2->find_int_con(-1) != mask) {
  96     return false;
  97   }
  98   in1 = in1->in(1);
  99 
 100   return is_gc_state_load(in1);
 101 }
 102 
 103 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 104   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 105 }
 106 
 107 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 108   if (!UseShenandoahGC) {
 109     return false;
 110   }
 111   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 112     return false;
 113   }
 114   Node* addp = n->in(MemNode::Address);
 115   if (!addp->is_AddP()) {
 116     return false;
 117   }
 118   Node* base = addp->in(AddPNode::Address);
 119   Node* off = addp->in(AddPNode::Offset);
 120   if (base->Opcode() != Op_ThreadLocal) {
 121     return false;
 122   }
 123   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 124     return false;
 125   }
 126   return true;
 127 }
 128 
 129 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 130   assert(phase->is_dominator(stop, start), "bad inputs");
 131   ResourceMark rm;
 132   Unique_Node_List wq;
 133   wq.push(start);
 134   for (uint next = 0; next < wq.size(); next++) {
 135     Node *m = wq.at(next);
 136     if (m == stop) {
 137       continue;
 138     }
 139     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 140       return true;
 141     }
 142     if (m->is_Region()) {
 143       for (uint i = 1; i < m->req(); i++) {
 144         wq.push(m->in(i));
 145       }
 146     } else {
 147       wq.push(m->in(0));
 148     }
 149   }
 150   return false;
 151 }
 152 
 153 #ifdef ASSERT
 154 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 155   assert(phis.size() == 0, "");
 156 
 157   while (true) {
 158     if (in->bottom_type() == TypePtr::NULL_PTR) {
 159       if (trace) {tty->print_cr("null");}
 160     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 161       if (trace) {tty->print_cr("Non oop");}
 162     } else {
 163       if (in->is_ConstraintCast()) {
 164         in = in->in(1);
 165         continue;
 166       } else if (in->is_AddP()) {
 167         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 168         in = in->in(AddPNode::Address);
 169         continue;
 170       } else if (in->is_Con()) {
 171         if (trace) {
 172           tty->print("Found constant");
 173           in->dump();
 174         }
 175       } else if (in->Opcode() == Op_Parm) {
 176         if (trace) {
 177           tty->print("Found argument");
 178         }
 179       } else if (in->Opcode() == Op_CreateEx) {
 180         if (trace) {
 181           tty->print("Found create-exception");
 182         }
 183       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 184         if (trace) {
 185           tty->print("Found raw LoadP (OSR argument?)");
 186         }
 187       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 188         if (t == ShenandoahOopStore) {
 189           uint i = 0;
 190           for (; i < phis.size(); i++) {
 191             Node* n = phis.node_at(i);
 192             if (n->Opcode() == Op_ShenandoahIUBarrier) {
 193               break;
 194             }
 195           }
 196           if (i == phis.size()) {
 197             return false;
 198           }
 199         }
 200         barriers_used.push(in);
 201         if (trace) {tty->print("Found barrier"); in->dump();}
 202       } else if (in->Opcode() == Op_ShenandoahIUBarrier) {
 203         if (t != ShenandoahOopStore) {
 204           in = in->in(1);
 205           continue;
 206         }
 207         if (trace) {tty->print("Found enqueue barrier"); in->dump();}
 208         phis.push(in, in->req());
 209         in = in->in(1);
 210         continue;
 211       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 212         if (trace) {
 213           tty->print("Found alloc");
 214           in->in(0)->dump();
 215         }
 216       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 217         if (trace) {
 218           tty->print("Found Java call");
 219         }
 220       } else if (in->is_Phi()) {
 221         if (!visited.test_set(in->_idx)) {
 222           if (trace) {tty->print("Pushed phi:"); in->dump();}
 223           phis.push(in, 2);
 224           in = in->in(1);
 225           continue;
 226         }
 227         if (trace) {tty->print("Already seen phi:"); in->dump();}
 228       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 229         if (!visited.test_set(in->_idx)) {
 230           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 231           phis.push(in, CMoveNode::IfTrue);
 232           in = in->in(CMoveNode::IfFalse);
 233           continue;
 234         }
 235         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 236       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 237         in = in->in(1);
 238         continue;
 239       } else {
 240         return false;
 241       }
 242     }
 243     bool cont = false;
 244     while (phis.is_nonempty()) {
 245       uint idx = phis.index();
 246       Node* phi = phis.node();
 247       if (idx >= phi->req()) {
 248         if (trace) {tty->print("Popped phi:"); phi->dump();}
 249         phis.pop();
 250         continue;
 251       }
 252       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 253       in = phi->in(idx);
 254       phis.set_index(idx+1);
 255       cont = true;
 256       break;
 257     }
 258     if (!cont) {
 259       break;
 260     }
 261   }
 262   return true;
 263 }
 264 
 265 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 266   if (n1 != nullptr) {
 267     n1->dump(+10);
 268   }
 269   if (n2 != nullptr) {
 270     n2->dump(+10);
 271   }
 272   fatal("%s", msg);
 273 }
 274 
 275 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 276   ResourceMark rm;
 277   Unique_Node_List wq;
 278   GrowableArray<Node*> barriers;
 279   Unique_Node_List barriers_used;
 280   Node_Stack phis(0);
 281   VectorSet visited;
 282   const bool trace = false;
 283   const bool verify_no_useless_barrier = false;
 284 
 285   wq.push(root);
 286   for (uint next = 0; next < wq.size(); next++) {
 287     Node *n = wq.at(next);
 288     if (n->is_Load()) {
 289       const bool trace = false;
 290       if (trace) {tty->print("Verifying"); n->dump();}
 291       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 292         if (trace) {tty->print_cr("Load range/klass");}
 293       } else {
 294         const TypePtr* adr_type = n->as_Load()->adr_type();
 295 
 296         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 297           if (trace) {tty->print_cr("Mark load");}
 298         } else if (adr_type->isa_instptr() &&
 299                    adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 300                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
 301           if (trace) {tty->print_cr("Reference.get()");}
 302         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 303           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 304         }
 305       }
 306     } else if (n->is_Store()) {
 307       const bool trace = false;
 308 
 309       if (trace) {tty->print("Verifying"); n->dump();}
 310       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 311         Node* adr = n->in(MemNode::Address);
 312         bool verify = true;
 313 
 314         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 315           adr = adr->in(AddPNode::Address);
 316           if (adr->is_AddP()) {
 317             assert(adr->in(AddPNode::Base)->is_top(), "");
 318             adr = adr->in(AddPNode::Address);
 319             if (adr->Opcode() == Op_LoadP &&
 320                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 321                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 322                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 323               if (trace) {tty->print_cr("SATB prebarrier");}
 324               verify = false;
 325             }
 326           }
 327         }
 328 
 329         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 330           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 331         }
 332       }
 333       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 334         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 335       }
 336     } else if (n->Opcode() == Op_CmpP) {
 337       const bool trace = false;
 338 
 339       Node* in1 = n->in(1);
 340       Node* in2 = n->in(2);
 341       if (in1->bottom_type()->isa_oopptr()) {
 342         if (trace) {tty->print("Verifying"); n->dump();}
 343 
 344         bool mark_inputs = false;
 345         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 346             (in1->is_Con() || in2->is_Con())) {
 347           if (trace) {tty->print_cr("Comparison against a constant");}
 348           mark_inputs = true;
 349         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 350                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 351           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 352           mark_inputs = true;
 353         } else {
 354           assert(in2->bottom_type()->isa_oopptr(), "");
 355 
 356           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 357               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 358             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 359           }
 360         }
 361         if (verify_no_useless_barrier &&
 362             mark_inputs &&
 363             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 364              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 365           phis.clear();
 366           visited.reset();
 367         }
 368       }
 369     } else if (n->is_LoadStore()) {
 370       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 371           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
 372         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 373       }
 374 
 375       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 376         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 377       }
 378     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 379       CallNode* call = n->as_Call();
 380 
 381       static struct {
 382         const char* name;
 383         struct {
 384           int pos;
 385           verify_type t;
 386         } args[6];
 387       } calls[] = {
 388         "array_partition_stub",
 389         { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore },   { -1, ShenandoahNone },
 390           { -1, ShenandoahNone },                { -1, ShenandoahNone },                  { -1, ShenandoahNone } },
 391         "arraysort_stub",
 392         { { TypeFunc::Parms, ShenandoahStore },  { -1, ShenandoahNone },                  { -1, ShenandoahNone },
 393           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 394         "aescrypt_encryptBlock",
 395         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 396           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 397         "aescrypt_decryptBlock",
 398         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 399           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 400         "multiplyToLen",
 401         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 402           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 403         "squareToLen",
 404         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 405           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 406         "montgomery_multiply",
 407         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 408           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 409         "montgomery_square",
 410         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 411           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 412         "mulAdd",
 413         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 414           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 415         "vectorizedMismatch",
 416         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 417           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 418         "updateBytesCRC32",
 419         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 420           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 421         "updateBytesAdler32",
 422         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 423           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 424         "updateBytesCRC32C",
 425         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 426           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 427         "counterMode_AESCrypt",
 428         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 429           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 430         "cipherBlockChaining_encryptAESCrypt",
 431         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 432           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 433         "cipherBlockChaining_decryptAESCrypt",
 434         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 435           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 436         "shenandoah_clone_barrier",
 437         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 438           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 439         "ghash_processBlocks",
 440         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 441           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 442         "sha1_implCompress",
 443         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 444           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 445         "sha256_implCompress",
 446         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 447           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 448         "sha512_implCompress",
 449         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 450           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 451         "sha1_implCompressMB",
 452         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 453           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 454         "sha256_implCompressMB",
 455         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 456           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 457         "sha512_implCompressMB",
 458         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 459           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 460         "encodeBlock",
 461         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 462           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 463         "decodeBlock",
 464         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 465           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 466       };
 467 
 468       if (call->is_call_to_arraycopystub()) {
 469         Node* dest = nullptr;
 470         const TypeTuple* args = n->as_Call()->_tf->domain();
 471         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 472           if (args->field_at(i)->isa_ptr()) {
 473             j++;
 474             if (j == 2) {
 475               dest = n->in(i);
 476               break;
 477             }
 478           }
 479         }
 480         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 481             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 482           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 483         }
 484       } else if (strlen(call->_name) > 5 &&
 485                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 486         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 487           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 488         }
 489       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 490         // skip
 491       } else {
 492         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 493         int i = 0;
 494         for (; i < calls_len; i++) {
 495           if (!strcmp(calls[i].name, call->_name)) {
 496             break;
 497           }
 498         }
 499         if (i != calls_len) {
 500           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 501           for (uint j = 0; j < args_len; j++) {
 502             int pos = calls[i].args[j].pos;
 503             if (pos == -1) {
 504               break;
 505             }
 506             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 507               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 508             }
 509           }
 510           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 511             if (call->in(j)->bottom_type()->make_ptr() &&
 512                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 513               uint k = 0;
 514               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 515               if (k == args_len) {
 516                 fatal("arg %d for call %s not covered", j, call->_name);
 517               }
 518             }
 519           }
 520         } else {
 521           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 522             if (call->in(j)->bottom_type()->make_ptr() &&
 523                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 524               fatal("%s not covered", call->_name);
 525             }
 526           }
 527         }
 528       }
 529     } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 530       // skip
 531     } else if (n->is_AddP()
 532                || n->is_Phi()
 533                || n->is_ConstraintCast()
 534                || n->Opcode() == Op_Return
 535                || n->Opcode() == Op_CMoveP
 536                || n->Opcode() == Op_CMoveN
 537                || n->Opcode() == Op_Rethrow
 538                || n->is_MemBar()
 539                || n->Opcode() == Op_Conv2B
 540                || n->Opcode() == Op_SafePoint
 541                || n->is_CallJava()
 542                || n->Opcode() == Op_Unlock
 543                || n->Opcode() == Op_EncodeP
 544                || n->Opcode() == Op_DecodeN) {
 545       // nothing to do
 546     } else {
 547       static struct {
 548         int opcode;
 549         struct {
 550           int pos;
 551           verify_type t;
 552         } inputs[2];
 553       } others[] = {
 554         Op_FastLock,
 555         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 556         Op_Lock,
 557         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 558         Op_ArrayCopy,
 559         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 560         Op_StrCompressedCopy,
 561         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 562         Op_StrInflatedCopy,
 563         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 564         Op_AryEq,
 565         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 566         Op_StrIndexOf,
 567         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 568         Op_StrComp,
 569         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 570         Op_StrEquals,
 571         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 572         Op_VectorizedHashCode,
 573         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 574         Op_EncodeISOArray,
 575         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 576         Op_CountPositives,
 577         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 578         Op_CastP2X,
 579         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 580         Op_StrIndexOfChar,
 581         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 582       };
 583 
 584       const int others_len = sizeof(others) / sizeof(others[0]);
 585       int i = 0;
 586       for (; i < others_len; i++) {
 587         if (others[i].opcode == n->Opcode()) {
 588           break;
 589         }
 590       }
 591       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 592       if (i != others_len) {
 593         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 594         for (uint j = 0; j < inputs_len; j++) {
 595           int pos = others[i].inputs[j].pos;
 596           if (pos == -1) {
 597             break;
 598           }
 599           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 600             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 601           }
 602         }
 603         for (uint j = 1; j < stop; j++) {
 604           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 605               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 606             uint k = 0;
 607             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 608             if (k == inputs_len) {
 609               fatal("arg %d for node %s not covered", j, n->Name());
 610             }
 611           }
 612         }
 613       } else {
 614         for (uint j = 1; j < stop; j++) {
 615           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 616               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 617             fatal("%s not covered", n->Name());
 618           }
 619         }
 620       }
 621     }
 622 
 623     if (n->is_SafePoint()) {
 624       SafePointNode* sfpt = n->as_SafePoint();
 625       if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
 626         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 627           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 628             phis.clear();
 629             visited.reset();
 630           }
 631         }
 632       }
 633     }
 634   }
 635 
 636   if (verify_no_useless_barrier) {
 637     for (int i = 0; i < barriers.length(); i++) {
 638       Node* n = barriers.at(i);
 639       if (!barriers_used.member(n)) {
 640         tty->print("XXX useless barrier"); n->dump(-2);
 641         ShouldNotReachHere();
 642       }
 643     }
 644   }
 645 }
 646 #endif
 647 
 648 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 649   // That both nodes have the same control is not sufficient to prove
 650   // domination, verify that there's no path from d to n
 651   ResourceMark rm;
 652   Unique_Node_List wq;
 653   wq.push(d);
 654   for (uint next = 0; next < wq.size(); next++) {
 655     Node *m = wq.at(next);
 656     if (m == n) {
 657       return false;
 658     }
 659     if (m->is_Phi() && m->in(0)->is_Loop()) {
 660       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 661     } else {
 662       if (m->is_Store() || m->is_LoadStore()) {
 663         // Take anti-dependencies into account
 664         Node* mem = m->in(MemNode::Memory);
 665         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 666           Node* u = mem->fast_out(i);
 667           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 668               phase->ctrl_or_self(u) == c) {
 669             wq.push(u);
 670           }
 671         }
 672       }
 673       for (uint i = 0; i < m->req(); i++) {
 674         if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
 675           wq.push(m->in(i));
 676         }
 677       }
 678     }
 679   }
 680   return true;
 681 }
 682 
 683 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 684   if (d_c != n_c) {
 685     return phase->is_dominator(d_c, n_c);
 686   }
 687   return is_dominator_same_ctrl(d_c, d, n, phase);
 688 }
 689 
 690 Node* next_mem(Node* mem, int alias) {
 691   Node* res = nullptr;
 692   if (mem->is_Proj()) {
 693     res = mem->in(0);
 694   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 695     res = mem->in(TypeFunc::Memory);
 696   } else if (mem->is_Phi()) {
 697     res = mem->in(1);
 698   } else if (mem->is_MergeMem()) {
 699     res = mem->as_MergeMem()->memory_at(alias);
 700   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 701     assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 702     res = mem->in(MemNode::Memory);
 703   } else {
 704 #ifdef ASSERT
 705     mem->dump();
 706 #endif
 707     ShouldNotReachHere();
 708   }
 709   return res;
 710 }
 711 
 712 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 713   Node* iffproj = nullptr;
 714   while (c != dom) {
 715     Node* next = phase->idom(c);
 716     assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 717     if (c->is_Region()) {
 718       ResourceMark rm;
 719       Unique_Node_List wq;
 720       wq.push(c);
 721       for (uint i = 0; i < wq.size(); i++) {
 722         Node *n = wq.at(i);
 723         if (n == next) {
 724           continue;
 725         }
 726         if (n->is_Region()) {
 727           for (uint j = 1; j < n->req(); j++) {
 728             wq.push(n->in(j));
 729           }
 730         } else {
 731           wq.push(n->in(0));
 732         }
 733       }
 734       for (uint i = 0; i < wq.size(); i++) {
 735         Node *n = wq.at(i);
 736         assert(n->is_CFG(), "");
 737         if (n->is_Multi()) {
 738           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 739             Node* u = n->fast_out(j);
 740             if (u->is_CFG()) {
 741               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 742                 return NodeSentinel;
 743               }
 744             }
 745           }
 746         }
 747       }
 748     } else  if (c->is_Proj()) {
 749       if (c->is_IfProj()) {
 750         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
 751           // continue;
 752         } else {
 753           if (!allow_one_proj) {
 754             return NodeSentinel;
 755           }
 756           if (iffproj == nullptr) {
 757             iffproj = c;
 758           } else {
 759             return NodeSentinel;
 760           }
 761         }
 762       } else if (c->Opcode() == Op_JumpProj) {
 763         return NodeSentinel; // unsupported
 764       } else if (c->Opcode() == Op_CatchProj) {
 765         return NodeSentinel; // unsupported
 766       } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
 767         return NodeSentinel; // unsupported
 768       } else {
 769         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 770       }
 771     }
 772     c = next;
 773   }
 774   return iffproj;
 775 }
 776 
 777 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 778   ResourceMark rm;
 779   VectorSet wq;
 780   wq.set(mem->_idx);
 781   mem_ctrl = phase->ctrl_or_self(mem);
 782   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 783     mem = next_mem(mem, alias);
 784     if (wq.test_set(mem->_idx)) {
 785       return nullptr;
 786     }
 787     mem_ctrl = phase->ctrl_or_self(mem);
 788   }
 789   if (mem->is_MergeMem()) {
 790     mem = mem->as_MergeMem()->memory_at(alias);
 791     mem_ctrl = phase->ctrl_or_self(mem);
 792   }
 793   return mem;
 794 }
 795 
 796 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 797   Node* mem = nullptr;
 798   Node* c = ctrl;
 799   do {
 800     if (c->is_Region()) {
 801       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
 802         Node* u = c->fast_out(i);
 803         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 804           if (u->adr_type() == TypePtr::BOTTOM) {
 805             mem = u;
 806           }
 807         }
 808       }
 809     } else {
 810       if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
 811         CallProjections projs;
 812         c->as_Call()->extract_projections(&projs, true, false);
 813         if (projs.fallthrough_memproj != nullptr) {
 814           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 815             if (projs.catchall_memproj == nullptr) {
 816               mem = projs.fallthrough_memproj;
 817             } else {
 818               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 819                 mem = projs.fallthrough_memproj;
 820               } else {
 821                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 822                 mem = projs.catchall_memproj;
 823               }
 824             }
 825           }
 826         } else {
 827           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 828           if (proj != nullptr &&
 829               proj->adr_type() == TypePtr::BOTTOM) {
 830             mem = proj;
 831           }
 832         }
 833       } else {
 834         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 835           Node* u = c->fast_out(i);
 836           if (u->is_Proj() &&
 837               u->bottom_type() == Type::MEMORY &&
 838               u->adr_type() == TypePtr::BOTTOM) {
 839               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 840               assert(mem == nullptr, "only one proj");
 841               mem = u;
 842           }
 843         }
 844         assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
 845       }
 846     }
 847     c = phase->idom(c);
 848   } while (mem == nullptr);
 849   return mem;
 850 }
 851 
 852 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 853   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 854     Node* u = n->fast_out(i);
 855     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 856       uses.push(u);
 857     }
 858   }
 859 }
 860 
 861 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 862   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 863   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 864   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 865   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 866   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 867   phase->lazy_replace(outer, new_outer);
 868   phase->lazy_replace(le, new_le);
 869   inner->clear_strip_mined();
 870 }
 871 
 872 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 873                                                PhaseIdealLoop* phase, int flags) {
 874   PhaseIterGVN& igvn = phase->igvn();
 875   Node* old_ctrl = ctrl;
 876 
 877   Node* thread          = new ThreadLocalNode();
 878   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 879   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 880   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 881                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 882                                         TypeInt::BYTE, MemNode::unordered);
 883   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 884   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 885   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 886 
 887   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 888   ctrl                  = new IfTrueNode(gc_state_iff);
 889   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 890 
 891   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 892   phase->register_control(gc_state_iff,   loop, old_ctrl);
 893   phase->register_control(ctrl,           loop, gc_state_iff);
 894   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 895 
 896   phase->register_new_node(thread,        old_ctrl);
 897   phase->register_new_node(gc_state_addr, old_ctrl);
 898   phase->register_new_node(gc_state,      old_ctrl);
 899   phase->register_new_node(gc_state_and,  old_ctrl);
 900   phase->register_new_node(gc_state_cmp,  old_ctrl);
 901   phase->register_new_node(gc_state_bool, old_ctrl);
 902 
 903   phase->set_ctrl(gc_state_offset, phase->C->root());
 904 
 905   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 906 }
 907 
 908 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 909   Node* old_ctrl = ctrl;
 910   PhaseIterGVN& igvn = phase->igvn();
 911 
 912   const Type* val_t = igvn.type(val);
 913   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 914     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 915     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 916 
 917     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 918     ctrl             = new IfTrueNode(null_iff);
 919     null_ctrl        = new IfFalseNode(null_iff);
 920 
 921     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 922     phase->register_control(null_iff,  loop, old_ctrl);
 923     phase->register_control(ctrl,      loop, null_iff);
 924     phase->register_control(null_ctrl, loop, null_iff);
 925 
 926     phase->register_new_node(null_cmp,  old_ctrl);
 927     phase->register_new_node(null_test, old_ctrl);
 928   }
 929 }
 930 
 931 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 932   Node* old_ctrl = ctrl;
 933   PhaseIterGVN& igvn = phase->igvn();
 934 
 935   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 936   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 937 
 938   // Figure out the target cset address with raw pointer math.
 939   // This avoids matching AddP+LoadB that would emit inefficient code.
 940   // See JDK-8245465.
 941   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 942   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 943   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 944   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 945 
 946   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 947                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 948                                        TypeInt::BYTE, MemNode::unordered);
 949   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 950   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 951 
 952   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 953   ctrl                 = new IfTrueNode(cset_iff);
 954   not_cset_ctrl        = new IfFalseNode(cset_iff);
 955 
 956   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 957   phase->register_control(cset_iff,      loop, old_ctrl);
 958   phase->register_control(ctrl,          loop, cset_iff);
 959   phase->register_control(not_cset_ctrl, loop, cset_iff);
 960 
 961   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 962 
 963   phase->register_new_node(raw_val,        old_ctrl);
 964   phase->register_new_node(cset_idx,       old_ctrl);
 965   phase->register_new_node(cset_addr,      old_ctrl);
 966   phase->register_new_node(cset_load_addr, old_ctrl);
 967   phase->register_new_node(cset_load_ptr,  old_ctrl);
 968   phase->register_new_node(cset_load,      old_ctrl);
 969   phase->register_new_node(cset_cmp,       old_ctrl);
 970   phase->register_new_node(cset_bool,      old_ctrl);
 971 }
 972 
 973 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
 974                                                DecoratorSet decorators, PhaseIdealLoop* phase) {
 975   IdealLoopTree*loop = phase->get_loop(ctrl);
 976   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 977 
 978   address calladdr = nullptr;
 979   const char* name = nullptr;
 980   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 981   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 982   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 983   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 984   bool is_narrow  = UseCompressedOops && !is_native;
 985   if (is_strong) {
 986     if (is_narrow) {
 987       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 988       name = "load_reference_barrier_strong_narrow";
 989     } else {
 990       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 991       name = "load_reference_barrier_strong";
 992     }
 993   } else if (is_weak) {
 994     if (is_narrow) {
 995       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 996       name = "load_reference_barrier_weak_narrow";
 997     } else {
 998       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 999       name = "load_reference_barrier_weak";
1000     }
1001   } else {
1002     assert(is_phantom, "only remaining strength");
1003     if (is_narrow) {
1004       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1005       name = "load_reference_barrier_phantom_narrow";
1006     } else {
1007       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1008       name = "load_reference_barrier_phantom";
1009     }
1010   }
1011   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1012 
1013   call->init_req(TypeFunc::Control, ctrl);
1014   call->init_req(TypeFunc::I_O, phase->C->top());
1015   call->init_req(TypeFunc::Memory, phase->C->top());
1016   call->init_req(TypeFunc::FramePtr, phase->C->top());
1017   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1018   call->init_req(TypeFunc::Parms, val);
1019   call->init_req(TypeFunc::Parms+1, load_addr);
1020   phase->register_control(call, loop, ctrl);
1021   ctrl = new ProjNode(call, TypeFunc::Control);
1022   phase->register_control(ctrl, loop, call);
1023   val = new ProjNode(call, TypeFunc::Parms);
1024   phase->register_new_node(val, call);
1025   val = new CheckCastPPNode(ctrl, val, obj_type);
1026   phase->register_new_node(val, ctrl);
1027 }
1028 
1029 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1030   Node* ctrl = phase->get_ctrl(barrier);
1031   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1032 
1033   // Update the control of all nodes that should be after the
1034   // barrier control flow
1035   uses.clear();
1036   // Every node that is control dependent on the barrier's input
1037   // control will be after the expanded barrier. The raw memory (if
1038   // its memory is control dependent on the barrier's input control)
1039   // must stay above the barrier.
1040   uses_to_ignore.clear();
1041   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1042     uses_to_ignore.push(init_raw_mem);
1043   }
1044   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1045     Node *n = uses_to_ignore.at(next);
1046     for (uint i = 0; i < n->req(); i++) {
1047       Node* in = n->in(i);
1048       if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1049         uses_to_ignore.push(in);
1050       }
1051     }
1052   }
1053   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1054     Node* u = ctrl->fast_out(i);
1055     if (u->_idx < last &&
1056         u != barrier &&
1057         !uses_to_ignore.member(u) &&
1058         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1059         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1060       Node* old_c = phase->ctrl_or_self(u);
1061       Node* c = old_c;
1062       if (c != ctrl ||
1063           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1064           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1065         phase->igvn().rehash_node_delayed(u);
1066         int nb = u->replace_edge(ctrl, region, &phase->igvn());
1067         if (u->is_CFG()) {
1068           if (phase->idom(u) == ctrl) {
1069             phase->set_idom(u, region, phase->dom_depth(region));
1070           }
1071         } else if (phase->get_ctrl(u) == ctrl) {
1072           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1073           uses.push(u);
1074         }
1075         assert(nb == 1, "more than 1 ctrl input?");
1076         --i, imax -= nb;
1077       }
1078     }
1079   }
1080 }
1081 
1082 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1083   Node* region = nullptr;
1084   while (c != ctrl) {
1085     if (c->is_Region()) {
1086       region = c;
1087     }
1088     c = phase->idom(c);
1089   }
1090   assert(region != nullptr, "");
1091   Node* phi = new PhiNode(region, n->bottom_type());
1092   for (uint j = 1; j < region->req(); j++) {
1093     Node* in = region->in(j);
1094     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1095       phi->init_req(j, n);
1096     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1097       phi->init_req(j, n_clone);
1098     } else {
1099       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1100     }
1101   }
1102   phase->register_new_node(phi, region);
1103   return phi;
1104 }
1105 
1106 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1107   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1108 
1109   Unique_Node_List uses;
1110   for (int i = 0; i < state->iu_barriers_count(); i++) {
1111     Node* barrier = state->iu_barrier(i);
1112     Node* ctrl = phase->get_ctrl(barrier);
1113     IdealLoopTree* loop = phase->get_loop(ctrl);
1114     Node* head = loop->head();
1115     if (head->is_OuterStripMinedLoop()) {
1116       // Expanding a barrier here will break loop strip mining
1117       // verification. Transform the loop so the loop nest doesn't
1118       // appear as strip mined.
1119       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1120       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1121     }
1122   }
1123 
1124   Node_Stack stack(0);
1125   Node_List clones;
1126   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1127     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1128 
1129     Node* ctrl = phase->get_ctrl(lrb);
1130     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1131 
1132     CallStaticJavaNode* unc = nullptr;
1133     Node* unc_ctrl = nullptr;
1134     Node* uncasted_val = val;
1135 
1136     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1137       Node* u = lrb->fast_out(i);
1138       if (u->Opcode() == Op_CastPP &&
1139           u->in(0) != nullptr &&
1140           phase->is_dominator(u->in(0), ctrl)) {
1141         const Type* u_t = phase->igvn().type(u);
1142 
1143         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1144             u->in(0)->Opcode() == Op_IfTrue &&
1145             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1146             u->in(0)->in(0)->is_If() &&
1147             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1148             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1149             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1150             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1151             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1152           IdealLoopTree* loop = phase->get_loop(ctrl);
1153           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1154 
1155           if (!unc_loop->is_member(loop)) {
1156             continue;
1157           }
1158 
1159           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1160           assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1161           if (branch == NodeSentinel) {
1162             continue;
1163           }
1164 
1165           Node* iff = u->in(0)->in(0);
1166           Node* bol = iff->in(1)->clone();
1167           Node* cmp = bol->in(1)->clone();
1168           cmp->set_req(1, lrb);
1169           bol->set_req(1, cmp);
1170           phase->igvn().replace_input_of(iff, 1, bol);
1171           phase->set_ctrl(lrb, iff->in(0));
1172           phase->register_new_node(cmp, iff->in(0));
1173           phase->register_new_node(bol, iff->in(0));
1174           break;
1175         }
1176       }
1177     }
1178     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1179       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1180       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1181         // The rethrow call may have too many projections to be
1182         // properly handled here. Given there's no reason for a
1183         // barrier to depend on the call, move it above the call
1184         stack.push(lrb, 0);
1185         do {
1186           Node* n = stack.node();
1187           uint idx = stack.index();
1188           if (idx < n->req()) {
1189             Node* in = n->in(idx);
1190             stack.set_index(idx+1);
1191             if (in != nullptr) {
1192               if (phase->has_ctrl(in)) {
1193                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1194 #ifdef ASSERT
1195                   for (uint i = 0; i < stack.size(); i++) {
1196                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1197                   }
1198 #endif
1199                   stack.push(in, 0);
1200                 }
1201               } else {
1202                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1203               }
1204             }
1205           } else {
1206             phase->set_ctrl(n, call->in(0));
1207             stack.pop();
1208           }
1209         } while(stack.size() > 0);
1210         continue;
1211       }
1212       CallProjections projs;
1213       call->extract_projections(&projs, false, false);
1214 
1215 #ifdef ASSERT
1216       VectorSet cloned;
1217 #endif
1218       Node* lrb_clone = lrb->clone();
1219       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1220       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1221 
1222       stack.push(lrb, 0);
1223       clones.push(lrb_clone);
1224 
1225       do {
1226         assert(stack.size() == clones.size(), "");
1227         Node* n = stack.node();
1228 #ifdef ASSERT
1229         if (n->is_Load()) {
1230           Node* mem = n->in(MemNode::Memory);
1231           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1232             Node* u = mem->fast_out(j);
1233             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1234           }
1235         }
1236 #endif
1237         uint idx = stack.index();
1238         Node* n_clone = clones.at(clones.size()-1);
1239         if (idx < n->outcnt()) {
1240           Node* u = n->raw_out(idx);
1241           Node* c = phase->ctrl_or_self(u);
1242           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1243             stack.set_index(idx+1);
1244             assert(!u->is_CFG(), "");
1245             stack.push(u, 0);
1246             assert(!cloned.test_set(u->_idx), "only one clone");
1247             Node* u_clone = u->clone();
1248             int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1249             assert(nb > 0, "should have replaced some uses");
1250             phase->register_new_node(u_clone, projs.catchall_catchproj);
1251             clones.push(u_clone);
1252             phase->set_ctrl(u, projs.fallthrough_catchproj);
1253           } else {
1254             bool replaced = false;
1255             if (u->is_Phi()) {
1256               for (uint k = 1; k < u->req(); k++) {
1257                 if (u->in(k) == n) {
1258                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1259                     phase->igvn().replace_input_of(u, k, n_clone);
1260                     replaced = true;
1261                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1262                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1263                     replaced = true;
1264                   }
1265                 }
1266               }
1267             } else {
1268               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1269                 phase->igvn().rehash_node_delayed(u);
1270                 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1271                 assert(nb > 0, "should have replaced some uses");
1272                 replaced = true;
1273               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1274                 if (u->is_If()) {
1275                   // Can't break If/Bool/Cmp chain
1276                   assert(n->is_Bool(), "unexpected If shape");
1277                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1278                   assert(n_clone->is_Bool(), "unexpected clone");
1279                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1280                   Node* bol_clone = n->clone();
1281                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1282                   bol_clone->set_req(1, cmp_clone);
1283 
1284                   Node* nn = stack.node_at(stack.size()-3);
1285                   Node* nn_clone = clones.at(clones.size()-3);
1286                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1287 
1288                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1289                                                    &phase->igvn());
1290                   assert(nb > 0, "should have replaced some uses");
1291 
1292                   phase->register_new_node(bol_clone, u->in(0));
1293                   phase->register_new_node(cmp_clone, u->in(0));
1294 
1295                   phase->igvn().replace_input_of(u, 1, bol_clone);
1296 
1297                 } else {
1298                   phase->igvn().rehash_node_delayed(u);
1299                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1300                   assert(nb > 0, "should have replaced some uses");
1301                 }
1302                 replaced = true;
1303               }
1304             }
1305             if (!replaced) {
1306               stack.set_index(idx+1);
1307             }
1308           }
1309         } else {
1310           stack.pop();
1311           clones.pop();
1312         }
1313       } while (stack.size() > 0);
1314       assert(stack.size() == 0 && clones.size() == 0, "");
1315     }
1316   }
1317 
1318   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1319     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1320     Node* ctrl = phase->get_ctrl(lrb);
1321     IdealLoopTree* loop = phase->get_loop(ctrl);
1322     Node* head = loop->head();
1323     if (head->is_OuterStripMinedLoop()) {
1324       // Expanding a barrier here will break loop strip mining
1325       // verification. Transform the loop so the loop nest doesn't
1326       // appear as strip mined.
1327       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1328       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1329     }
1330   }
1331 
1332   // Expand load-reference-barriers
1333   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1334   Unique_Node_List uses_to_ignore;
1335   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1336     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1337     uint last = phase->C->unique();
1338     Node* ctrl = phase->get_ctrl(lrb);
1339     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1340 
1341     Node* orig_ctrl = ctrl;
1342 
1343     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1344     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1345 
1346     IdealLoopTree *loop = phase->get_loop(ctrl);
1347 
1348     Node* heap_stable_ctrl = nullptr;
1349     Node* null_ctrl = nullptr;
1350 
1351     assert(val->bottom_type()->make_oopptr(), "need oop");
1352     assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1353 
1354     enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1355     Node* region = new RegionNode(PATH_LIMIT);
1356     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1357 
1358     // Stable path.
1359     int flags = ShenandoahHeap::HAS_FORWARDED;
1360     if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1361       flags |= ShenandoahHeap::WEAK_ROOTS;
1362     }
1363     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1364     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1365 
1366     // Heap stable case
1367     region->init_req(_heap_stable, heap_stable_ctrl);
1368     val_phi->init_req(_heap_stable, val);
1369 
1370     // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1371     // even for non-cset objects to prevent resurrection of such objects.
1372     // Wires !in_cset(obj) to slot 2 of region and phis
1373     Node* not_cset_ctrl = nullptr;
1374     if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1375       test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1376     }
1377     if (not_cset_ctrl != nullptr) {
1378       region->init_req(_not_cset, not_cset_ctrl);
1379       val_phi->init_req(_not_cset, val);
1380     } else {
1381       region->del_req(_not_cset);
1382       val_phi->del_req(_not_cset);
1383     }
1384 
1385     // Resolve object when orig-value is in cset.
1386     // Make the unconditional resolve for fwdptr.
1387 
1388     // Call lrb-stub and wire up that path in slots 4
1389     Node* result_mem = nullptr;
1390 
1391     Node* addr;
1392     {
1393       VectorSet visited;
1394       addr = get_load_addr(phase, visited, lrb);
1395     }
1396     if (addr->Opcode() == Op_AddP) {
1397       Node* orig_base = addr->in(AddPNode::Base);
1398       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1399       phase->register_new_node(base, ctrl);
1400       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1401         // Field access
1402         addr = addr->clone();
1403         addr->set_req(AddPNode::Base, base);
1404         addr->set_req(AddPNode::Address, base);
1405         phase->register_new_node(addr, ctrl);
1406       } else {
1407         Node* addr2 = addr->in(AddPNode::Address);
1408         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1409               addr2->in(AddPNode::Base) == orig_base) {
1410           addr2 = addr2->clone();
1411           addr2->set_req(AddPNode::Base, base);
1412           addr2->set_req(AddPNode::Address, base);
1413           phase->register_new_node(addr2, ctrl);
1414           addr = addr->clone();
1415           addr->set_req(AddPNode::Base, base);
1416           addr->set_req(AddPNode::Address, addr2);
1417           phase->register_new_node(addr, ctrl);
1418         }
1419       }
1420     }
1421     call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1422     region->init_req(_evac_path, ctrl);
1423     val_phi->init_req(_evac_path, val);
1424 
1425     phase->register_control(region, loop, heap_stable_iff);
1426     Node* out_val = val_phi;
1427     phase->register_new_node(val_phi, region);
1428 
1429     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1430 
1431     ctrl = orig_ctrl;
1432 
1433     phase->igvn().replace_node(lrb, out_val);
1434 
1435     follow_barrier_uses(out_val, ctrl, uses, phase);
1436 
1437     for(uint next = 0; next < uses.size(); next++ ) {
1438       Node *n = uses.at(next);
1439       assert(phase->get_ctrl(n) == ctrl, "bad control");
1440       assert(n != raw_mem, "should leave input raw mem above the barrier");
1441       phase->set_ctrl(n, region);
1442       follow_barrier_uses(n, ctrl, uses, phase);
1443     }
1444     fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1445   }
1446   // Done expanding load-reference-barriers.
1447   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1448 
1449   for (int i = state->iu_barriers_count() - 1; i >= 0; i--) {
1450     Node* barrier = state->iu_barrier(i);
1451     Node* pre_val = barrier->in(1);
1452 
1453     if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1454       ShouldNotReachHere();
1455       continue;
1456     }
1457 
1458     Node* ctrl = phase->get_ctrl(barrier);
1459 
1460     if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1461       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1462       ctrl = ctrl->in(0)->in(0);
1463       phase->set_ctrl(barrier, ctrl);
1464     } else if (ctrl->is_CallRuntime()) {
1465       assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1466       ctrl = ctrl->in(0);
1467       phase->set_ctrl(barrier, ctrl);
1468     }
1469 
1470     Node* init_ctrl = ctrl;
1471     IdealLoopTree* loop = phase->get_loop(ctrl);
1472     Node* raw_mem = fixer.find_mem(ctrl, barrier);
1473     Node* init_raw_mem = raw_mem;
1474     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1475     Node* heap_stable_ctrl = nullptr;
1476     Node* null_ctrl = nullptr;
1477     uint last = phase->C->unique();
1478 
1479     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1480     Node* region = new RegionNode(PATH_LIMIT);
1481     Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1482 
1483     enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1484     Node* region2 = new RegionNode(PATH_LIMIT2);
1485     Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1486 
1487     // Stable path.
1488     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1489     region->init_req(_heap_stable, heap_stable_ctrl);
1490     phi->init_req(_heap_stable, raw_mem);
1491 
1492     // Null path
1493     Node* reg2_ctrl = nullptr;
1494     test_null(ctrl, pre_val, null_ctrl, phase);
1495     if (null_ctrl != nullptr) {
1496       reg2_ctrl = null_ctrl->in(0);
1497       region2->init_req(_null_path, null_ctrl);
1498       phi2->init_req(_null_path, raw_mem);
1499     } else {
1500       region2->del_req(_null_path);
1501       phi2->del_req(_null_path);
1502     }
1503 
1504     const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1505     const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1506     Node* thread = new ThreadLocalNode();
1507     phase->register_new_node(thread, ctrl);
1508     Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1509     phase->register_new_node(buffer_adr, ctrl);
1510     Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1511     phase->register_new_node(index_adr, ctrl);
1512 
1513     BasicType index_bt = TypeX_X->basic_type();
1514     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1515     const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1516     Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1517     phase->register_new_node(index, ctrl);
1518     Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1519     phase->register_new_node(index_cmp, ctrl);
1520     Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1521     phase->register_new_node(index_test, ctrl);
1522     IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1523     if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff;
1524     phase->register_control(queue_full_iff, loop, ctrl);
1525     Node* not_full = new IfTrueNode(queue_full_iff);
1526     phase->register_control(not_full, loop, queue_full_iff);
1527     Node* full = new IfFalseNode(queue_full_iff);
1528     phase->register_control(full, loop, queue_full_iff);
1529 
1530     ctrl = not_full;
1531 
1532     Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1533     phase->register_new_node(next_index, ctrl);
1534 
1535     Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1536     phase->register_new_node(buffer, ctrl);
1537     Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1538     phase->register_new_node(log_addr, ctrl);
1539     Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1540     phase->register_new_node(log_store, ctrl);
1541     // update the index
1542     Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1543     phase->register_new_node(index_update, ctrl);
1544 
1545     // Fast-path case
1546     region2->init_req(_fast_path, ctrl);
1547     phi2->init_req(_fast_path, index_update);
1548 
1549     ctrl = full;
1550 
1551     Node* base = find_bottom_mem(ctrl, phase);
1552 
1553     MergeMemNode* mm = MergeMemNode::make(base);
1554     mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1555     phase->register_new_node(mm, ctrl);
1556 
1557     Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1558     call->init_req(TypeFunc::Control, ctrl);
1559     call->init_req(TypeFunc::I_O, phase->C->top());
1560     call->init_req(TypeFunc::Memory, mm);
1561     call->init_req(TypeFunc::FramePtr, phase->C->top());
1562     call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1563     call->init_req(TypeFunc::Parms, pre_val);
1564     call->init_req(TypeFunc::Parms+1, thread);
1565     phase->register_control(call, loop, ctrl);
1566 
1567     Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1568     phase->register_control(ctrl_proj, loop, call);
1569     Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1570     phase->register_new_node(mem_proj, call);
1571 
1572     // Slow-path case
1573     region2->init_req(_slow_path, ctrl_proj);
1574     phi2->init_req(_slow_path, mem_proj);
1575 
1576     phase->register_control(region2, loop, reg2_ctrl);
1577     phase->register_new_node(phi2, region2);
1578 
1579     region->init_req(_heap_unstable, region2);
1580     phi->init_req(_heap_unstable, phi2);
1581 
1582     phase->register_control(region, loop, heap_stable_ctrl->in(0));
1583     phase->register_new_node(phi, region);
1584 
1585     fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1586     for(uint next = 0; next < uses.size(); next++ ) {
1587       Node *n = uses.at(next);
1588       assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1589       assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1590       phase->set_ctrl(n, region);
1591       follow_barrier_uses(n, init_ctrl, uses, phase);
1592     }
1593     fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1594 
1595     phase->igvn().replace_node(barrier, pre_val);
1596   }
1597   assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1598 
1599 }
1600 
1601 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1602   if (visited.test_set(in->_idx)) {
1603     return nullptr;
1604   }
1605   switch (in->Opcode()) {
1606     case Op_Proj:
1607       return get_load_addr(phase, visited, in->in(0));
1608     case Op_CastPP:
1609     case Op_CheckCastPP:
1610     case Op_DecodeN:
1611     case Op_EncodeP:
1612       return get_load_addr(phase, visited, in->in(1));
1613     case Op_LoadN:
1614     case Op_LoadP:
1615       return in->in(MemNode::Address);
1616     case Op_CompareAndExchangeN:
1617     case Op_CompareAndExchangeP:
1618     case Op_GetAndSetN:
1619     case Op_GetAndSetP:
1620     case Op_ShenandoahCompareAndExchangeP:
1621     case Op_ShenandoahCompareAndExchangeN:
1622       // Those instructions would just have stored a different
1623       // value into the field. No use to attempt to fix it at this point.
1624       return phase->igvn().zerocon(T_OBJECT);
1625     case Op_CMoveP:
1626     case Op_CMoveN: {
1627       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1628       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1629       // Handle unambiguous cases: single address reported on both branches.
1630       if (t != nullptr && f == nullptr) return t;
1631       if (t == nullptr && f != nullptr) return f;
1632       if (t != nullptr && t == f)    return t;
1633       // Ambiguity.
1634       return phase->igvn().zerocon(T_OBJECT);
1635     }
1636     case Op_Phi: {
1637       Node* addr = nullptr;
1638       for (uint i = 1; i < in->req(); i++) {
1639         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1640         if (addr == nullptr) {
1641           addr = addr1;
1642         }
1643         if (addr != addr1) {
1644           return phase->igvn().zerocon(T_OBJECT);
1645         }
1646       }
1647       return addr;
1648     }
1649     case Op_ShenandoahLoadReferenceBarrier:
1650       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1651     case Op_ShenandoahIUBarrier:
1652       return get_load_addr(phase, visited, in->in(1));
1653     case Op_CallDynamicJava:
1654     case Op_CallLeaf:
1655     case Op_CallStaticJava:
1656     case Op_ConN:
1657     case Op_ConP:
1658     case Op_Parm:
1659     case Op_CreateEx:
1660       return phase->igvn().zerocon(T_OBJECT);
1661     default:
1662 #ifdef ASSERT
1663       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1664 #endif
1665       return phase->igvn().zerocon(T_OBJECT);
1666   }
1667 
1668 }
1669 
1670 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1671   IdealLoopTree *loop = phase->get_loop(iff);
1672   Node* loop_head = loop->_head;
1673   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1674 
1675   Node* bol = iff->in(1);
1676   Node* cmp = bol->in(1);
1677   Node* andi = cmp->in(1);
1678   Node* load = andi->in(1);
1679 
1680   assert(is_gc_state_load(load), "broken");
1681   if (!phase->is_dominator(load->in(0), entry_c)) {
1682     Node* mem_ctrl = nullptr;
1683     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1684     load = load->clone();
1685     load->set_req(MemNode::Memory, mem);
1686     load->set_req(0, entry_c);
1687     phase->register_new_node(load, entry_c);
1688     andi = andi->clone();
1689     andi->set_req(1, load);
1690     phase->register_new_node(andi, entry_c);
1691     cmp = cmp->clone();
1692     cmp->set_req(1, andi);
1693     phase->register_new_node(cmp, entry_c);
1694     bol = bol->clone();
1695     bol->set_req(1, cmp);
1696     phase->register_new_node(bol, entry_c);
1697 
1698     phase->igvn().replace_input_of(iff, 1, bol);
1699   }
1700 }
1701 
1702 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1703   if (!n->is_If() || n->is_CountedLoopEnd()) {
1704     return false;
1705   }
1706   Node* region = n->in(0);
1707 
1708   if (!region->is_Region()) {
1709     return false;
1710   }
1711   Node* dom = phase->idom(region);
1712   if (!dom->is_If()) {
1713     return false;
1714   }
1715 
1716   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1717     return false;
1718   }
1719 
1720   IfNode* dom_if = dom->as_If();
1721   Node* proj_true = dom_if->proj_out(1);
1722   Node* proj_false = dom_if->proj_out(0);
1723 
1724   for (uint i = 1; i < region->req(); i++) {
1725     if (phase->is_dominator(proj_true, region->in(i))) {
1726       continue;
1727     }
1728     if (phase->is_dominator(proj_false, region->in(i))) {
1729       continue;
1730     }
1731     return false;
1732   }
1733 
1734   return true;
1735 }
1736 
1737 bool ShenandoahBarrierC2Support::merge_point_safe(Node* region) {
1738   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1739     Node* n = region->fast_out(i);
1740     if (n->is_LoadStore()) {
1741       // Splitting a LoadStore node through phi, causes it to lose its SCMemProj: the split if code doesn't have support
1742       // for a LoadStore at the region the if is split through because that's not expected to happen (LoadStore nodes
1743       // should be between barrier nodes). It does however happen with Shenandoah though because barriers can get
1744       // expanded around a LoadStore node.
1745       return false;
1746     }
1747   }
1748   return true;
1749 }
1750 
1751 
1752 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1753   assert(is_heap_stable_test(n), "no other tests");
1754   if (identical_backtoback_ifs(n, phase)) {
1755     Node* n_ctrl = n->in(0);
1756     if (phase->can_split_if(n_ctrl) && merge_point_safe(n_ctrl)) {
1757       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1758       if (is_heap_stable_test(n)) {
1759         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1760         assert(is_gc_state_load(gc_state_load), "broken");
1761         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1762         assert(is_gc_state_load(dom_gc_state_load), "broken");
1763         if (gc_state_load != dom_gc_state_load) {
1764           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1765         }
1766       }
1767       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1768       Node* proj_true = dom_if->proj_out(1);
1769       Node* proj_false = dom_if->proj_out(0);
1770       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1771       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1772 
1773       for (uint i = 1; i < n_ctrl->req(); i++) {
1774         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1775           bolphi->init_req(i, con_true);
1776         } else {
1777           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1778           bolphi->init_req(i, con_false);
1779         }
1780       }
1781       phase->register_new_node(bolphi, n_ctrl);
1782       phase->igvn().replace_input_of(n, 1, bolphi);
1783       phase->do_split_if(n);
1784     }
1785   }
1786 }
1787 
1788 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1789   // Find first invariant test that doesn't exit the loop
1790   LoopNode *head = loop->_head->as_Loop();
1791   IfNode* unswitch_iff = nullptr;
1792   Node* n = head->in(LoopNode::LoopBackControl);
1793   int loop_has_sfpts = -1;
1794   while (n != head) {
1795     Node* n_dom = phase->idom(n);
1796     if (n->is_Region()) {
1797       if (n_dom->is_If()) {
1798         IfNode* iff = n_dom->as_If();
1799         if (iff->in(1)->is_Bool()) {
1800           BoolNode* bol = iff->in(1)->as_Bool();
1801           if (bol->in(1)->is_Cmp()) {
1802             // If condition is invariant and not a loop exit,
1803             // then found reason to unswitch.
1804             if (is_heap_stable_test(iff) &&
1805                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1806               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1807               if (loop_has_sfpts == -1) {
1808                 for(uint i = 0; i < loop->_body.size(); i++) {
1809                   Node *m = loop->_body[i];
1810                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1811                     loop_has_sfpts = 1;
1812                     break;
1813                   }
1814                 }
1815                 if (loop_has_sfpts == -1) {
1816                   loop_has_sfpts = 0;
1817                 }
1818               }
1819               if (!loop_has_sfpts) {
1820                 unswitch_iff = iff;
1821               }
1822             }
1823           }
1824         }
1825       }
1826     }
1827     n = n_dom;
1828   }
1829   return unswitch_iff;
1830 }
1831 
1832 
1833 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1834   Node_List heap_stable_tests;
1835   stack.push(phase->C->start(), 0);
1836   do {
1837     Node* n = stack.node();
1838     uint i = stack.index();
1839 
1840     if (i < n->outcnt()) {
1841       Node* u = n->raw_out(i);
1842       stack.set_index(i+1);
1843       if (!visited.test_set(u->_idx)) {
1844         stack.push(u, 0);
1845       }
1846     } else {
1847       stack.pop();
1848       if (n->is_If() && is_heap_stable_test(n)) {
1849         heap_stable_tests.push(n);
1850       }
1851     }
1852   } while (stack.size() > 0);
1853 
1854   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1855     Node* n = heap_stable_tests.at(i);
1856     assert(is_heap_stable_test(n), "only evacuation test");
1857     merge_back_to_back_tests(n, phase);
1858   }
1859 
1860   if (!phase->C->major_progress()) {
1861     VectorSet seen;
1862     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1863       Node* n = heap_stable_tests.at(i);
1864       IdealLoopTree* loop = phase->get_loop(n);
1865       if (loop != phase->ltree_root() &&
1866           loop->_child == nullptr &&
1867           !loop->_irreducible) {
1868         Node* head = loop->_head;
1869         if (head->is_Loop() &&
1870             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1871             !seen.test_set(head->_idx)) {
1872           IfNode* iff = find_unswitching_candidate(loop, phase);
1873           if (iff != nullptr) {
1874             Node* bol = iff->in(1);
1875             if (head->as_Loop()->is_strip_mined()) {
1876               head->as_Loop()->verify_strip_mined(0);
1877             }
1878             move_gc_state_test_out_of_loop(iff, phase);
1879 
1880             AutoNodeBudget node_budget(phase);
1881 
1882             if (loop->policy_unswitching(phase)) {
1883               if (head->as_Loop()->is_strip_mined()) {
1884                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1885                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1886               }
1887               phase->do_unswitching(loop, old_new);
1888             } else {
1889               // Not proceeding with unswitching. Move load back in
1890               // the loop.
1891               phase->igvn().replace_input_of(iff, 1, bol);
1892             }
1893           }
1894         }
1895       }
1896     }
1897   }
1898 }
1899 
1900 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) {
1901   ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
1902 }
1903 
1904 const Type* ShenandoahIUBarrierNode::bottom_type() const {
1905   if (in(1) == nullptr || in(1)->is_top()) {
1906     return Type::TOP;
1907   }
1908   const Type* t = in(1)->bottom_type();
1909   if (t == TypePtr::NULL_PTR) {
1910     return t;
1911   }
1912   return t->is_oopptr();
1913 }
1914 
1915 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
1916   if (in(1) == nullptr) {
1917     return Type::TOP;
1918   }
1919   const Type* t = phase->type(in(1));
1920   if (t == Type::TOP) {
1921     return Type::TOP;
1922   }
1923   if (t == TypePtr::NULL_PTR) {
1924     return t;
1925   }
1926   return t->is_oopptr();
1927 }
1928 
1929 int ShenandoahIUBarrierNode::needed(Node* n) {
1930   if (n == nullptr ||
1931       n->is_Allocate() ||
1932       n->Opcode() == Op_ShenandoahIUBarrier ||
1933       n->bottom_type() == TypePtr::NULL_PTR ||
1934       (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) {
1935     return NotNeeded;
1936   }
1937   if (n->is_Phi() ||
1938       n->is_CMove()) {
1939     return MaybeNeeded;
1940   }
1941   return Needed;
1942 }
1943 
1944 Node* ShenandoahIUBarrierNode::next(Node* n) {
1945   for (;;) {
1946     if (n == nullptr) {
1947       return n;
1948     } else if (n->bottom_type() == TypePtr::NULL_PTR) {
1949       return n;
1950     } else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) {
1951       return n;
1952     } else if (n->is_ConstraintCast() ||
1953                n->Opcode() == Op_DecodeN ||
1954                n->Opcode() == Op_EncodeP) {
1955       n = n->in(1);
1956     } else if (n->is_Proj()) {
1957       n = n->in(0);
1958     } else {
1959       return n;
1960     }
1961   }
1962   ShouldNotReachHere();
1963   return nullptr;
1964 }
1965 
1966 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
1967   PhaseIterGVN* igvn = phase->is_IterGVN();
1968 
1969   Node* n = next(in(1));
1970 
1971   int cont = needed(n);
1972 
1973   if (cont == NotNeeded) {
1974     return in(1);
1975   } else if (cont == MaybeNeeded) {
1976     if (igvn == nullptr) {
1977       phase->record_for_igvn(this);
1978       return this;
1979     } else {
1980       ResourceMark rm;
1981       Unique_Node_List wq;
1982       uint wq_i = 0;
1983 
1984       for (;;) {
1985         if (n->is_Phi()) {
1986           for (uint i = 1; i < n->req(); i++) {
1987             Node* m = n->in(i);
1988             if (m != nullptr) {
1989               wq.push(m);
1990             }
1991           }
1992         } else {
1993           assert(n->is_CMove(), "nothing else here");
1994           Node* m = n->in(CMoveNode::IfFalse);
1995           wq.push(m);
1996           m = n->in(CMoveNode::IfTrue);
1997           wq.push(m);
1998         }
1999         Node* orig_n = nullptr;
2000         do {
2001           if (wq_i >= wq.size()) {
2002             return in(1);
2003           }
2004           n = wq.at(wq_i);
2005           wq_i++;
2006           orig_n = n;
2007           n = next(n);
2008           cont = needed(n);
2009           if (cont == Needed) {
2010             return this;
2011           }
2012         } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2013       }
2014     }
2015   }
2016 
2017   return this;
2018 }
2019 
2020 #ifdef ASSERT
2021 static bool has_never_branch(Node* root) {
2022   for (uint i = 1; i < root->req(); i++) {
2023     Node* in = root->in(i);
2024     if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
2025       return true;
2026     }
2027   }
2028   return false;
2029 }
2030 #endif
2031 
2032 void MemoryGraphFixer::collect_memory_nodes() {
2033   Node_Stack stack(0);
2034   VectorSet visited;
2035   Node_List regions;
2036 
2037   // Walk the raw memory graph and create a mapping from CFG node to
2038   // memory node. Exclude phis for now.
2039   stack.push(_phase->C->root(), 1);
2040   do {
2041     Node* n = stack.node();
2042     int opc = n->Opcode();
2043     uint i = stack.index();
2044     if (i < n->req()) {
2045       Node* mem = nullptr;
2046       if (opc == Op_Root) {
2047         Node* in = n->in(i);
2048         int in_opc = in->Opcode();
2049         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2050           mem = in->in(TypeFunc::Memory);
2051         } else if (in_opc == Op_Halt) {
2052           if (in->in(0)->is_Region()) {
2053             Node* r = in->in(0);
2054             for (uint j = 1; j < r->req(); j++) {
2055               assert(!r->in(j)->is_NeverBranch(), "");
2056             }
2057           } else {
2058             Node* proj = in->in(0);
2059             assert(proj->is_Proj(), "");
2060             Node* in = proj->in(0);
2061             assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2062             if (in->is_CallStaticJava()) {
2063               mem = in->in(TypeFunc::Memory);
2064             } else if (in->Opcode() == Op_Catch) {
2065               Node* call = in->in(0)->in(0);
2066               assert(call->is_Call(), "");
2067               mem = call->in(TypeFunc::Memory);
2068             } else if (in->is_NeverBranch()) {
2069               mem = collect_memory_for_infinite_loop(in);
2070             }
2071           }
2072         } else {
2073 #ifdef ASSERT
2074           n->dump();
2075           in->dump();
2076 #endif
2077           ShouldNotReachHere();
2078         }
2079       } else {
2080         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2081         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2082         mem = n->in(i);
2083       }
2084       i++;
2085       stack.set_index(i);
2086       if (mem == nullptr) {
2087         continue;
2088       }
2089       for (;;) {
2090         if (visited.test_set(mem->_idx) || mem->is_Start()) {
2091           break;
2092         }
2093         if (mem->is_Phi()) {
2094           stack.push(mem, 2);
2095           mem = mem->in(1);
2096         } else if (mem->is_Proj()) {
2097           stack.push(mem, mem->req());
2098           mem = mem->in(0);
2099         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2100           mem = mem->in(TypeFunc::Memory);
2101         } else if (mem->is_MergeMem()) {
2102           MergeMemNode* mm = mem->as_MergeMem();
2103           mem = mm->memory_at(_alias);
2104         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2105           assert(_alias == Compile::AliasIdxRaw, "");
2106           stack.push(mem, mem->req());
2107           mem = mem->in(MemNode::Memory);
2108         } else {
2109 #ifdef ASSERT
2110           mem->dump();
2111 #endif
2112           ShouldNotReachHere();
2113         }
2114       }
2115     } else {
2116       if (n->is_Phi()) {
2117         // Nothing
2118       } else if (!n->is_Root()) {
2119         Node* c = get_ctrl(n);
2120         _memory_nodes.map(c->_idx, n);
2121       }
2122       stack.pop();
2123     }
2124   } while(stack.is_nonempty());
2125 
2126   // Iterate over CFG nodes in rpo and propagate memory state to
2127   // compute memory state at regions, creating new phis if needed.
2128   Node_List rpo_list;
2129   visited.clear();
2130   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2131   Node* root = rpo_list.pop();
2132   assert(root == _phase->C->root(), "");
2133 
2134   const bool trace = false;
2135 #ifdef ASSERT
2136   if (trace) {
2137     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2138       Node* c = rpo_list.at(i);
2139       if (_memory_nodes[c->_idx] != nullptr) {
2140         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
2141       }
2142     }
2143   }
2144 #endif
2145   uint last = _phase->C->unique();
2146 
2147 #ifdef ASSERT
2148   uint16_t max_depth = 0;
2149   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2150     IdealLoopTree* lpt = iter.current();
2151     max_depth = MAX2(max_depth, lpt->_nest);
2152   }
2153 #endif
2154 
2155   bool progress = true;
2156   int iteration = 0;
2157   Node_List dead_phis;
2158   while (progress) {
2159     progress = false;
2160     iteration++;
2161     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2162     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2163 
2164     for (int i = rpo_list.size() - 1; i >= 0; i--) {
2165       Node* c = rpo_list.at(i);
2166 
2167       Node* prev_mem = _memory_nodes[c->_idx];
2168       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2169         Node* prev_region = regions[c->_idx];
2170         Node* unique = nullptr;
2171         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2172           Node* m = _memory_nodes[c->in(j)->_idx];
2173           assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2174           if (m != nullptr) {
2175             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2176               assert((c->is_Loop() && j == LoopNode::LoopBackControl) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2177               // continue
2178             } else if (unique == nullptr) {
2179               unique = m;
2180             } else if (m == unique) {
2181               // continue
2182             } else {
2183               unique = NodeSentinel;
2184             }
2185           }
2186         }
2187         assert(unique != nullptr, "empty phi???");
2188         if (unique != NodeSentinel) {
2189           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
2190             dead_phis.push(prev_region);
2191           }
2192           regions.map(c->_idx, unique);
2193         } else {
2194           Node* phi = nullptr;
2195           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2196             phi = prev_region;
2197             for (uint k = 1; k < c->req(); k++) {
2198               Node* m = _memory_nodes[c->in(k)->_idx];
2199               assert(m != nullptr, "expect memory state");
2200               phi->set_req(k, m);
2201             }
2202           } else {
2203             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
2204               Node* u = c->fast_out(j);
2205               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2206                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2207                 phi = u;
2208                 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
2209                   Node* m = _memory_nodes[c->in(k)->_idx];
2210                   assert(m != nullptr, "expect memory state");
2211                   if (u->in(k) != m) {
2212                     phi = NodeSentinel;
2213                   }
2214                 }
2215               }
2216             }
2217             if (phi == NodeSentinel) {
2218               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2219               for (uint k = 1; k < c->req(); k++) {
2220                 Node* m = _memory_nodes[c->in(k)->_idx];
2221                 assert(m != nullptr, "expect memory state");
2222                 phi->init_req(k, m);
2223               }
2224             }
2225           }
2226           if (phi != nullptr) {
2227             regions.map(c->_idx, phi);
2228           } else {
2229             assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2230           }
2231         }
2232         Node* current_region = regions[c->_idx];
2233         if (current_region != prev_region) {
2234           progress = true;
2235           if (prev_region == prev_mem) {
2236             _memory_nodes.map(c->_idx, current_region);
2237           }
2238         }
2239       } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2240         Node* m = _memory_nodes[_phase->idom(c)->_idx];
2241         assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
2242         if (m != prev_mem) {
2243           _memory_nodes.map(c->_idx, m);
2244           progress = true;
2245         }
2246       }
2247 #ifdef ASSERT
2248       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
2249 #endif
2250     }
2251   }
2252 
2253   // Replace existing phi with computed memory state for that region
2254   // if different (could be a new phi or a dominating memory node if
2255   // that phi was found to be useless).
2256   while (dead_phis.size() > 0) {
2257     Node* n = dead_phis.pop();
2258     n->replace_by(_phase->C->top());
2259     n->destruct(&_phase->igvn());
2260   }
2261   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2262     Node* c = rpo_list.at(i);
2263     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2264       Node* n = regions[c->_idx];
2265       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2266       if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2267         _phase->register_new_node(n, c);
2268       }
2269     }
2270   }
2271   for (int i = rpo_list.size() - 1; i >= 0; i--) {
2272     Node* c = rpo_list.at(i);
2273     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2274       Node* n = regions[c->_idx];
2275       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2276       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2277         Node* u = c->fast_out(i);
2278         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2279             u != n) {
2280           assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
2281           if (u->adr_type() == TypePtr::BOTTOM) {
2282             fix_memory_uses(u, n, n, c);
2283           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2284             _phase->lazy_replace(u, n);
2285             --i; --imax;
2286           }
2287         }
2288       }
2289     }
2290   }
2291 }
2292 
2293 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
2294   Node* mem = nullptr;
2295   Node* head = in->in(0);
2296   assert(head->is_Region(), "unexpected infinite loop graph shape");
2297 
2298   Node* phi_mem = nullptr;
2299   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2300     Node* u = head->fast_out(j);
2301     if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2302       if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2303         assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2304         phi_mem = u;
2305       } else if (u->adr_type() == TypePtr::BOTTOM) {
2306         assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2307         if (phi_mem == nullptr) {
2308           phi_mem = u;
2309         }
2310       }
2311     }
2312   }
2313   if (phi_mem == nullptr) {
2314     ResourceMark rm;
2315     Node_Stack stack(0);
2316     stack.push(head, 1);
2317     do {
2318       Node* n = stack.node();
2319       uint i = stack.index();
2320       if (i >= n->req()) {
2321         stack.pop();
2322       } else {
2323         stack.set_index(i + 1);
2324         Node* c = n->in(i);
2325         assert(c != head, "should have found a safepoint on the way");
2326         if (stack.size() != 1 || _phase->is_dominator(head, c)) {
2327           for (;;) {
2328             if (c->is_Region()) {
2329               stack.push(c, 1);
2330               break;
2331             } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
2332               Node* m = c->in(TypeFunc::Memory);
2333               if (m->is_MergeMem()) {
2334                 m = m->as_MergeMem()->memory_at(_alias);
2335               }
2336               assert(mem == nullptr || mem == m, "several memory states");
2337               mem = m;
2338               break;
2339             } else {
2340               assert(c != c->in(0), "");
2341               c = c->in(0);
2342             }
2343           }
2344         }
2345       }
2346     } while (stack.size() > 0);
2347     assert(mem != nullptr, "should have found safepoint");
2348   } else {
2349     mem = phi_mem;
2350   }
2351   return mem;
2352 }
2353 
2354 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2355   Node* c = _phase->get_ctrl(n);
2356   if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
2357     assert(c == n->in(0), "");
2358     CallNode* call = c->as_Call();
2359     CallProjections projs;
2360     call->extract_projections(&projs, true, false);
2361     if (projs.catchall_memproj != nullptr) {
2362       if (projs.fallthrough_memproj == n) {
2363         c = projs.fallthrough_catchproj;
2364       } else {
2365         assert(projs.catchall_memproj == n, "");
2366         c = projs.catchall_catchproj;
2367       }
2368     }
2369   }
2370   return c;
2371 }
2372 
2373 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2374   if (_phase->has_ctrl(n))
2375     return get_ctrl(n);
2376   else {
2377     assert (n->is_CFG(), "must be a CFG node");
2378     return n;
2379   }
2380 }
2381 
2382 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2383   return m != nullptr && get_ctrl(m) == c;
2384 }
2385 
2386 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2387   assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
2388   assert(!ctrl->is_Call() || ctrl == n, "projection expected");
2389 #ifdef ASSERT
2390   if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
2391       (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
2392     CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
2393     int mems = 0;
2394     for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
2395       Node* u = call->fast_out(i);
2396       if (u->bottom_type() == Type::MEMORY) {
2397         mems++;
2398       }
2399     }
2400     assert(mems <= 1, "No node right after call if multiple mem projections");
2401   }
2402 #endif
2403   Node* mem = _memory_nodes[ctrl->_idx];
2404   Node* c = ctrl;
2405   while (!mem_is_valid(mem, c) &&
2406          (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2407     c = _phase->idom(c);
2408     mem = _memory_nodes[c->_idx];
2409   }
2410   if (n != nullptr && mem_is_valid(mem, c)) {
2411     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2412       mem = next_mem(mem, _alias);
2413     }
2414     if (mem->is_MergeMem()) {
2415       mem = mem->as_MergeMem()->memory_at(_alias);
2416     }
2417     if (!mem_is_valid(mem, c)) {
2418       do {
2419         c = _phase->idom(c);
2420         mem = _memory_nodes[c->_idx];
2421       } while (!mem_is_valid(mem, c) &&
2422                (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2423     }
2424   }
2425   assert(mem->bottom_type() == Type::MEMORY, "");
2426   return mem;
2427 }
2428 
2429 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2430   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2431     Node* use = region->fast_out(i);
2432     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2433         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2434       return true;
2435     }
2436   }
2437   return false;
2438 }
2439 
2440 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2441   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2442   const bool trace = false;
2443   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2444   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2445   GrowableArray<Node*> phis;
2446   if (mem_for_ctrl != mem) {
2447     Node* old = mem_for_ctrl;
2448     Node* prev = nullptr;
2449     while (old != mem) {
2450       prev = old;
2451       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2452         assert(_alias == Compile::AliasIdxRaw, "");
2453         old = old->in(MemNode::Memory);
2454       } else if (old->Opcode() == Op_SCMemProj) {
2455         assert(_alias == Compile::AliasIdxRaw, "");
2456         old = old->in(0);
2457       } else {
2458         ShouldNotReachHere();
2459       }
2460     }
2461     assert(prev != nullptr, "");
2462     if (new_ctrl != ctrl) {
2463       _memory_nodes.map(ctrl->_idx, mem);
2464       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2465     }
2466     uint input = (uint)MemNode::Memory;
2467     _phase->igvn().replace_input_of(prev, input, new_mem);
2468   } else {
2469     uses.clear();
2470     _memory_nodes.map(new_ctrl->_idx, new_mem);
2471     uses.push(new_ctrl);
2472     for(uint next = 0; next < uses.size(); next++ ) {
2473       Node *n = uses.at(next);
2474       assert(n->is_CFG(), "");
2475       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2476       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2477         Node* u = n->fast_out(i);
2478         if (!u->is_Root() && u->is_CFG() && u != n) {
2479           Node* m = _memory_nodes[u->_idx];
2480           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2481               !has_mem_phi(u) &&
2482               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2483             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2484             DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
2485 
2486             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2487               bool push = true;
2488               bool create_phi = true;
2489               if (_phase->is_dominator(new_ctrl, u)) {
2490                 create_phi = false;
2491               }
2492               if (create_phi) {
2493                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2494                 _phase->register_new_node(phi, u);
2495                 phis.push(phi);
2496                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2497                 if (!mem_is_valid(m, u)) {
2498                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2499                   _memory_nodes.map(u->_idx, phi);
2500                 } else {
2501                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2502                   for (;;) {
2503                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2504                     Node* next = nullptr;
2505                     if (m->is_Proj()) {
2506                       next = m->in(0);
2507                     } else {
2508                       assert(m->is_Mem() || m->is_LoadStore(), "");
2509                       assert(_alias == Compile::AliasIdxRaw, "");
2510                       next = m->in(MemNode::Memory);
2511                     }
2512                     if (_phase->get_ctrl(next) != u) {
2513                       break;
2514                     }
2515                     if (next->is_MergeMem()) {
2516                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2517                       break;
2518                     }
2519                     if (next->is_Phi()) {
2520                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2521                       break;
2522                     }
2523                     m = next;
2524                   }
2525 
2526                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2527                   assert(m->is_Mem() || m->is_LoadStore(), "");
2528                   uint input = (uint)MemNode::Memory;
2529                   _phase->igvn().replace_input_of(m, input, phi);
2530                   push = false;
2531                 }
2532               } else {
2533                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2534               }
2535               if (push) {
2536                 uses.push(u);
2537               }
2538             }
2539           } else if (!mem_is_valid(m, u) &&
2540                      !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2541             uses.push(u);
2542           }
2543         }
2544       }
2545     }
2546     for (int i = 0; i < phis.length(); i++) {
2547       Node* n = phis.at(i);
2548       Node* r = n->in(0);
2549       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2550       for (uint j = 1; j < n->req(); j++) {
2551         Node* m = find_mem(r->in(j), nullptr);
2552         _phase->igvn().replace_input_of(n, j, m);
2553         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2554       }
2555     }
2556   }
2557   uint last = _phase->C->unique();
2558   MergeMemNode* mm = nullptr;
2559   int alias = _alias;
2560   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2561   // Process loads first to not miss an anti-dependency: if the memory
2562   // edge of a store is updated before a load is processed then an
2563   // anti-dependency may be missed.
2564   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2565     Node* u = mem->out(i);
2566     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2567       Node* m = find_mem(_phase->get_ctrl(u), u);
2568       if (m != mem) {
2569         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2570         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2571         --i;
2572       }
2573     }
2574   }
2575   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2576     Node* u = mem->out(i);
2577     if (u->_idx < last) {
2578       if (u->is_Mem()) {
2579         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2580           Node* m = find_mem(_phase->get_ctrl(u), u);
2581           if (m != mem) {
2582             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2583             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2584             --i;
2585           }
2586         }
2587       } else if (u->is_MergeMem()) {
2588         MergeMemNode* u_mm = u->as_MergeMem();
2589         if (u_mm->memory_at(alias) == mem) {
2590           MergeMemNode* newmm = nullptr;
2591           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2592             Node* uu = u->fast_out(j);
2593             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2594             if (uu->is_Phi()) {
2595               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2596               Node* region = uu->in(0);
2597               int nb = 0;
2598               for (uint k = 1; k < uu->req(); k++) {
2599                 if (uu->in(k) == u) {
2600                   Node* m = find_mem(region->in(k), nullptr);
2601                   if (m != mem) {
2602                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2603                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2604                     if (newmm != u) {
2605                       _phase->igvn().replace_input_of(uu, k, newmm);
2606                       nb++;
2607                       --jmax;
2608                     }
2609                   }
2610                 }
2611               }
2612               if (nb > 0) {
2613                 --j;
2614               }
2615             } else {
2616               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2617               if (m != mem) {
2618                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2619                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2620                 if (newmm != u) {
2621                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2622                   --j, --jmax;
2623                 }
2624               }
2625             }
2626           }
2627         }
2628       } else if (u->is_Phi()) {
2629         assert(u->bottom_type() == Type::MEMORY, "what else?");
2630         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2631           Node* region = u->in(0);
2632           bool replaced = false;
2633           for (uint j = 1; j < u->req(); j++) {
2634             if (u->in(j) == mem) {
2635               Node* m = find_mem(region->in(j), nullptr);
2636               Node* nnew = m;
2637               if (m != mem) {
2638                 if (u->adr_type() == TypePtr::BOTTOM) {
2639                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2640                   nnew = mm;
2641                 }
2642                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2643                 _phase->igvn().replace_input_of(u, j, nnew);
2644                 replaced = true;
2645               }
2646             }
2647           }
2648           if (replaced) {
2649             --i;
2650           }
2651         }
2652       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2653                  u->adr_type() == nullptr) {
2654         assert(u->adr_type() != nullptr ||
2655                u->Opcode() == Op_Rethrow ||
2656                u->Opcode() == Op_Return ||
2657                u->Opcode() == Op_SafePoint ||
2658                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2659                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2660                u->Opcode() == Op_CallLeaf, "");
2661         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2662         if (m != mem) {
2663           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2664           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2665           --i;
2666         }
2667       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2668         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2669         if (m != mem) {
2670           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2671           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2672           --i;
2673         }
2674       } else if (u->adr_type() != TypePtr::BOTTOM &&
2675                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2676         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2677         assert(m != mem, "");
2678         // u is on the wrong slice...
2679         assert(u->is_ClearArray(), "");
2680         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2681         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2682         --i;
2683       }
2684     }
2685   }
2686 #ifdef ASSERT
2687   assert(new_mem->outcnt() > 0, "");
2688   for (int i = 0; i < phis.length(); i++) {
2689     Node* n = phis.at(i);
2690     assert(n->outcnt() > 0, "new phi must have uses now");
2691   }
2692 #endif
2693 }
2694 
2695 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2696   if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2697     _memory_nodes.map(ctrl->_idx, mem);
2698     _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2699   }
2700 }
2701 
2702 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2703   MergeMemNode* mm = MergeMemNode::make(mem);
2704   mm->set_memory_at(_alias, rep_proj);
2705   _phase->register_new_node(mm, rep_ctrl);
2706   return mm;
2707 }
2708 
2709 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2710   MergeMemNode* newmm = nullptr;
2711   MergeMemNode* u_mm = u->as_MergeMem();
2712   Node* c = _phase->get_ctrl(u);
2713   if (_phase->is_dominator(c, rep_ctrl)) {
2714     c = rep_ctrl;
2715   } else {
2716     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2717   }
2718   if (u->outcnt() == 1) {
2719     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2720       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2721       --i;
2722     } else {
2723       _phase->igvn().rehash_node_delayed(u);
2724       u_mm->set_memory_at(_alias, rep_proj);
2725     }
2726     newmm = u_mm;
2727     _phase->set_ctrl_and_loop(u, c);
2728   } else {
2729     // can't simply clone u and then change one of its input because
2730     // it adds and then removes an edge which messes with the
2731     // DUIterator
2732     newmm = MergeMemNode::make(u_mm->base_memory());
2733     for (uint j = 0; j < u->req(); j++) {
2734       if (j < newmm->req()) {
2735         if (j == (uint)_alias) {
2736           newmm->set_req(j, rep_proj);
2737         } else if (newmm->in(j) != u->in(j)) {
2738           newmm->set_req(j, u->in(j));
2739         }
2740       } else if (j == (uint)_alias) {
2741         newmm->add_req(rep_proj);
2742       } else {
2743         newmm->add_req(u->in(j));
2744       }
2745     }
2746     if ((uint)_alias >= u->req()) {
2747       newmm->set_memory_at(_alias, rep_proj);
2748     }
2749     _phase->register_new_node(newmm, c);
2750   }
2751   return newmm;
2752 }
2753 
2754 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2755   if (phi->adr_type() == TypePtr::BOTTOM) {
2756     Node* region = phi->in(0);
2757     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2758       Node* uu = region->fast_out(j);
2759       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2760         return false;
2761       }
2762     }
2763     return true;
2764   }
2765   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2766 }
2767 
2768 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2769   uint last = _phase-> C->unique();
2770   MergeMemNode* mm = nullptr;
2771   assert(mem->bottom_type() == Type::MEMORY, "");
2772   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2773     Node* u = mem->out(i);
2774     if (u != replacement && u->_idx < last) {
2775       if (u->is_MergeMem()) {
2776         MergeMemNode* u_mm = u->as_MergeMem();
2777         if (u_mm->memory_at(_alias) == mem) {
2778           MergeMemNode* newmm = nullptr;
2779           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2780             Node* uu = u->fast_out(j);
2781             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2782             if (uu->is_Phi()) {
2783               if (should_process_phi(uu)) {
2784                 Node* region = uu->in(0);
2785                 int nb = 0;
2786                 for (uint k = 1; k < uu->req(); k++) {
2787                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2788                     if (newmm == nullptr) {
2789                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2790                     }
2791                     if (newmm != u) {
2792                       _phase->igvn().replace_input_of(uu, k, newmm);
2793                       nb++;
2794                       --jmax;
2795                     }
2796                   }
2797                 }
2798                 if (nb > 0) {
2799                   --j;
2800                 }
2801               }
2802             } else {
2803               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2804                 if (newmm == nullptr) {
2805                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2806                 }
2807                 if (newmm != u) {
2808                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2809                   --j, --jmax;
2810                 }
2811               }
2812             }
2813           }
2814         }
2815       } else if (u->is_Phi()) {
2816         assert(u->bottom_type() == Type::MEMORY, "what else?");
2817         Node* region = u->in(0);
2818         if (should_process_phi(u)) {
2819           bool replaced = false;
2820           for (uint j = 1; j < u->req(); j++) {
2821             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2822               Node* nnew = rep_proj;
2823               if (u->adr_type() == TypePtr::BOTTOM) {
2824                 if (mm == nullptr) {
2825                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2826                 }
2827                 nnew = mm;
2828               }
2829               _phase->igvn().replace_input_of(u, j, nnew);
2830               replaced = true;
2831             }
2832           }
2833           if (replaced) {
2834             --i;
2835           }
2836 
2837         }
2838       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2839                  u->adr_type() == nullptr) {
2840         assert(u->adr_type() != nullptr ||
2841                u->Opcode() == Op_Rethrow ||
2842                u->Opcode() == Op_Return ||
2843                u->Opcode() == Op_SafePoint ||
2844                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2845                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2846                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2847         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2848           if (mm == nullptr) {
2849             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2850           }
2851           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2852           --i;
2853         }
2854       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2855         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2856           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2857           --i;
2858         }
2859       }
2860     }
2861   }
2862 }
2863 
2864 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2865 : Node(ctrl, obj), _decorators(decorators) {
2866   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2867 }
2868 
2869 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2870   return _decorators;
2871 }
2872 
2873 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2874   return sizeof(*this);
2875 }
2876 
2877 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2878   return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2879 }
2880 
2881 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2882   uint hash = Node::hash();
2883   hash += mask_decorators(_decorators);
2884   return hash;
2885 }
2886 
2887 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2888   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2889          mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2890 }
2891 
2892 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2893   if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2894     return Type::TOP;
2895   }
2896   const Type* t = in(ValueIn)->bottom_type();
2897   if (t == TypePtr::NULL_PTR) {
2898     return t;
2899   }
2900 
2901   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2902     return t;
2903   }
2904 
2905   return t->meet(TypePtr::NULL_PTR);
2906 }
2907 
2908 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2909   // Either input is TOP ==> the result is TOP
2910   const Type *t2 = phase->type(in(ValueIn));
2911   if( t2 == Type::TOP ) return Type::TOP;
2912 
2913   if (t2 == TypePtr::NULL_PTR) {
2914     return t2;
2915   }
2916 
2917   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2918     return t2;
2919   }
2920 
2921   return t2->meet(TypePtr::NULL_PTR);
2922 }
2923 
2924 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2925   Node* value = in(ValueIn);
2926   if (!needs_barrier(phase, value)) {
2927     return value;
2928   }
2929   return this;
2930 }
2931 
2932 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2933   Unique_Node_List visited;
2934   return needs_barrier_impl(phase, n, visited);
2935 }
2936 
2937 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2938   if (n == nullptr) return false;
2939   if (visited.member(n)) {
2940     return false; // Been there.
2941   }
2942   visited.push(n);
2943 
2944   if (n->is_Allocate()) {
2945     // tty->print_cr("optimize barrier on alloc");
2946     return false;
2947   }
2948   if (n->is_Call()) {
2949     // tty->print_cr("optimize barrier on call");
2950     return false;
2951   }
2952 
2953   const Type* type = phase->type(n);
2954   if (type == Type::TOP) {
2955     return false;
2956   }
2957   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2958     // tty->print_cr("optimize barrier on null");
2959     return false;
2960   }
2961   if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2962     // tty->print_cr("optimize barrier on constant");
2963     return false;
2964   }
2965 
2966   switch (n->Opcode()) {
2967     case Op_AddP:
2968       return true; // TODO: Can refine?
2969     case Op_LoadP:
2970     case Op_ShenandoahCompareAndExchangeN:
2971     case Op_ShenandoahCompareAndExchangeP:
2972     case Op_CompareAndExchangeN:
2973     case Op_CompareAndExchangeP:
2974     case Op_GetAndSetN:
2975     case Op_GetAndSetP:
2976       return true;
2977     case Op_Phi: {
2978       for (uint i = 1; i < n->req(); i++) {
2979         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2980       }
2981       return false;
2982     }
2983     case Op_CheckCastPP:
2984     case Op_CastPP:
2985       return needs_barrier_impl(phase, n->in(1), visited);
2986     case Op_Proj:
2987       return needs_barrier_impl(phase, n->in(0), visited);
2988     case Op_ShenandoahLoadReferenceBarrier:
2989       // tty->print_cr("optimize barrier on barrier");
2990       return false;
2991     case Op_Parm:
2992       // tty->print_cr("optimize barrier on input arg");
2993       return false;
2994     case Op_DecodeN:
2995     case Op_EncodeP:
2996       return needs_barrier_impl(phase, n->in(1), visited);
2997     case Op_LoadN:
2998       return true;
2999     case Op_CMoveN:
3000     case Op_CMoveP:
3001       return needs_barrier_impl(phase, n->in(2), visited) ||
3002              needs_barrier_impl(phase, n->in(3), visited);
3003     case Op_ShenandoahIUBarrier:
3004       return needs_barrier_impl(phase, n->in(1), visited);
3005     case Op_CreateEx:
3006       return false;
3007     default:
3008       break;
3009   }
3010 #ifdef ASSERT
3011   tty->print("need barrier on?: ");
3012   tty->print_cr("ins:");
3013   n->dump(2);
3014   tty->print_cr("outs:");
3015   n->dump(-2);
3016   ShouldNotReachHere();
3017 #endif
3018   return true;
3019 }
--- EOF ---