1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "opto/compile.hpp"
  26 #include "opto/castnode.hpp"
  27 #include "opto/graphKit.hpp"
  28 #include "opto/idealKit.hpp"
  29 #include "opto/loopnode.hpp"
  30 #include "opto/macro.hpp"
  31 #include "opto/node.hpp"
  32 #include "opto/type.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "gc/z/zBarrierSet.hpp"
  35 #include "gc/z/c2/zBarrierSetC2.hpp"
  36 #include "gc/z/zThreadLocalData.hpp"
  37 #include "gc/z/zBarrierSetRuntime.hpp"
  38 
  39 ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
  40   : _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
  41 
  42 int ZBarrierSetC2State::load_barrier_count() const {
  43   return _load_barrier_nodes->length();
  44 }
  45 
  46 void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
  47   assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
  48   _load_barrier_nodes->append(n);
  49 }
  50 
  51 void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
  52   // this function may be called twice for a node so check
  53   // that the node is in the array before attempting to remove it
  54   if (_load_barrier_nodes->contains(n)) {
  55     _load_barrier_nodes->remove(n);
  56   }
  57 }
  58 
  59 LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
  60   return _load_barrier_nodes->at(idx);
  61 }
  62 
  63 void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
  64   return new(comp_arena) ZBarrierSetC2State(comp_arena);
  65 }
  66 
  67 ZBarrierSetC2State* ZBarrierSetC2::state() const {
  68   return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
  69 }
  70 
  71 bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
  72   return node->is_LoadBarrier();
  73 }
  74 
  75 void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
  76   if (node->is_LoadBarrier()) {
  77     state()->add_load_barrier_node(node->as_LoadBarrier());
  78   }
  79 }
  80 
  81 void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
  82   if (node->is_LoadBarrier()) {
  83     state()->remove_load_barrier_node(node->as_LoadBarrier());
  84   }
  85 }
  86 
  87 void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
  88   // Remove useless LoadBarrier nodes
  89   ZBarrierSetC2State* s = state();
  90   for (int i = s->load_barrier_count()-1; i >= 0; i--) {
  91     LoadBarrierNode* n = s->load_barrier_node(i);
  92     if (!useful.member(n)) {
  93       unregister_potential_barrier_node(n);
  94     }
  95   }
  96 }
  97 
  98 void ZBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
  99   if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
 100     worklist.push(node);
 101   }
 102 }
 103 
 104 void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
 105   // Look for dominating barriers on the same address only once all
 106   // other loop opts are over: loop opts may cause a safepoint to be
 107   // inserted between a barrier and its dominating barrier.
 108   Compile* C = Compile::current();
 109   ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
 110   ZBarrierSetC2State* s = bs->state();
 111   if (s->load_barrier_count() >= 2) {
 112     Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
 113     PhaseIdealLoop ideal_loop(igvn, LoopOptsZgcLastRound);
 114     if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
 115   }
 116 }
 117 
 118 void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
 119   // Permanent temporary workaround
 120   // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
 121   // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
 122   // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
 123   ZBarrierSetC2State* s = state();
 124 
 125   for (int i = 0; i < s->load_barrier_count(); i++) {
 126     LoadBarrierNode* n = s->load_barrier_node(i);
 127     worklist->push(n);
 128   }
 129 }
 130 
 131 const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
 132   const Type** fields;
 133 
 134   // Create input types (domain)
 135   fields = TypeTuple::fields(2);
 136   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
 137   fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
 138   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 139 
 140   // Create result type (range)
 141   fields = TypeTuple::fields(1);
 142   fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
 143   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 144 
 145   return TypeFunc::make(domain, range);
 146 }
 147 
 148 // == LoadBarrierNode ==
 149 
 150 LoadBarrierNode::LoadBarrierNode(Compile* C,
 151                                  Node* c,
 152                                  Node* mem,
 153                                  Node* val,
 154                                  Node* adr,
 155                                  bool weak,
 156                                  bool writeback,
 157                                  bool oop_reload_allowed) :
 158     MultiNode(Number_of_Inputs),
 159     _weak(weak),
 160     _writeback(writeback),
 161     _oop_reload_allowed(oop_reload_allowed) {
 162   init_req(Control, c);
 163   init_req(Memory, mem);
 164   init_req(Oop, val);
 165   init_req(Address, adr);
 166   init_req(Similar, C->top());
 167 
 168   init_class_id(Class_LoadBarrier);
 169   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 170   bs->register_potential_barrier_node(this);
 171 }
 172 
 173 const Type *LoadBarrierNode::bottom_type() const {
 174   const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 175   Node* in_oop = in(Oop);
 176   floadbarrier[Control] = Type::CONTROL;
 177   floadbarrier[Memory] = Type::MEMORY;
 178   floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
 179   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 180 }
 181 
 182 const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
 183   const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
 184   const Type* val_t = phase->type(in(Oop));
 185   floadbarrier[Control] = Type::CONTROL;
 186   floadbarrier[Memory] = Type::MEMORY;
 187   floadbarrier[Oop] = val_t;
 188   return TypeTuple::make(Number_of_Outputs, floadbarrier);
 189 }
 190 
 191 bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
 192   if (phase != NULL) {
 193     return phase->is_dominator(d, n);
 194   }
 195 
 196   for (int i = 0; i < 10 && n != NULL; i++) {
 197     n = IfNode::up_one_dom(n, linear_only);
 198     if (n == d) {
 199       return true;
 200     }
 201   }
 202 
 203   return false;
 204 }
 205 
 206 LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
 207   Node* val = in(LoadBarrierNode::Oop);
 208   if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
 209     LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
 210     assert(lb->in(Address) == in(Address), "");
 211     // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
 212     if (lb->in(Oop) == in(Oop)) {
 213       return lb;
 214     }
 215     // Follow chain of load barrier through Similar edges
 216     while (!lb->in(Similar)->is_top()) {
 217       lb = lb->in(Similar)->in(0)->as_LoadBarrier();
 218       assert(lb->in(Address) == in(Address), "");
 219     }
 220     if (lb != in(Similar)->in(0)) {
 221       return lb;
 222     }
 223   }
 224   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 225     Node* u = val->fast_out(i);
 226     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
 227       Node* this_ctrl = in(LoadBarrierNode::Control);
 228       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 229       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 230         return u->as_LoadBarrier();
 231       }
 232     }
 233   }
 234 
 235   if (ZVerifyLoadBarriers || can_be_eliminated()) {
 236     return NULL;
 237   }
 238 
 239   if (!look_for_similar) {
 240     return NULL;
 241   }
 242 
 243   Node* addr = in(LoadBarrierNode::Address);
 244   for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 245     Node* u = addr->fast_out(i);
 246     if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
 247       Node* this_ctrl = in(LoadBarrierNode::Control);
 248       Node* other_ctrl = u->in(LoadBarrierNode::Control);
 249       if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
 250         ResourceMark rm;
 251         Unique_Node_List wq;
 252         wq.push(in(LoadBarrierNode::Control));
 253         bool ok = true;
 254         bool dom_found = false;
 255         for (uint next = 0; next < wq.size(); ++next) {
 256           Node *n = wq.at(next);
 257           if (n->is_top()) {
 258             return NULL;
 259           }
 260           assert(n->is_CFG(), "");
 261           if (n->is_SafePoint()) {
 262             ok = false;
 263             break;
 264           }
 265           if (n == u) {
 266             dom_found = true;
 267             continue;
 268           }
 269           if (n->is_Region()) {
 270             for (uint i = 1; i < n->req(); i++) {
 271               Node* m = n->in(i);
 272               if (m != NULL) {
 273                 wq.push(m);
 274               }
 275             }
 276           } else {
 277             Node* m = n->in(0);
 278             if (m != NULL) {
 279               wq.push(m);
 280             }
 281           }
 282         }
 283         if (ok) {
 284           assert(dom_found, "");
 285           return u->as_LoadBarrier();;
 286         }
 287         break;
 288       }
 289     }
 290   }
 291 
 292   return NULL;
 293 }
 294 
 295 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
 296   // Change to that barrier may affect a dominated barrier so re-push those
 297   Node* val = in(LoadBarrierNode::Oop);
 298 
 299   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
 300     Node* u = val->fast_out(i);
 301     if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
 302       Node* this_ctrl = in(Control);
 303       Node* other_ctrl = u->in(Control);
 304       if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 305         igvn->_worklist.push(u);
 306       }
 307     }
 308 
 309     Node* addr = in(LoadBarrierNode::Address);
 310     for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
 311       Node* u = addr->fast_out(i);
 312       if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
 313         Node* this_ctrl = in(Control);
 314         Node* other_ctrl = u->in(Control);
 315         if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
 316           igvn->_worklist.push(u);
 317         }
 318       }
 319     }
 320   }
 321 }
 322 
 323 Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
 324   if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
 325     return this;
 326   }
 327 
 328   bool redundant_addr = false;
 329   LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
 330   if (dominating_barrier != NULL) {
 331     assert(dominating_barrier->in(Oop) == in(Oop), "");
 332     return dominating_barrier;
 333   }
 334 
 335   return this;
 336 }
 337 
 338 Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 339   if (remove_dead_region(phase, can_reshape)) {
 340     return this;
 341   }
 342 
 343   Node* val = in(Oop);
 344   Node* mem = in(Memory);
 345   Node* ctrl = in(Control);
 346   Node* adr = in(Address);
 347   assert(val->Opcode() != Op_LoadN, "");
 348 
 349   if (mem->is_MergeMem()) {
 350     Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
 351     set_req(Memory, new_mem);
 352     if (mem->outcnt() == 0 && can_reshape) {
 353       phase->is_IterGVN()->_worklist.push(mem);
 354     }
 355 
 356     return this;
 357   }
 358 
 359   bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
 360   LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
 361   if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
 362     assert(in(Address) == dominating_barrier->in(Address), "");
 363     set_req(Similar, dominating_barrier->proj_out(Oop));
 364     return this;
 365   }
 366 
 367   bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
 368                    (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
 369 
 370   if (eliminate) {
 371     if (can_reshape) {
 372       PhaseIterGVN* igvn = phase->is_IterGVN();
 373       Node* out_ctrl = proj_out_or_null(Control);
 374       Node* out_res = proj_out_or_null(Oop);
 375 
 376       if (out_ctrl != NULL) {
 377         igvn->replace_node(out_ctrl, ctrl);
 378       }
 379 
 380       // That transformation may cause the Similar edge on the load barrier to be invalid
 381       fix_similar_in_uses(igvn);
 382       if (out_res != NULL) {
 383         if (dominating_barrier != NULL) {
 384           igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
 385         } else {
 386           igvn->replace_node(out_res, val);
 387         }
 388       }
 389     }
 390 
 391     return new ConINode(TypeInt::ZERO);
 392   }
 393 
 394   // If the Similar edge is no longer a load barrier, clear it
 395   Node* similar = in(Similar);
 396   if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
 397     set_req(Similar, phase->C->top());
 398     return this;
 399   }
 400 
 401   if (can_reshape) {
 402     // If this barrier is linked through the Similar edge by a
 403     // dominated barrier and both barriers have the same Oop field,
 404     // the dominated barrier can go away, so push it for reprocessing.
 405     // We also want to avoid a barrier to depend on another dominating
 406     // barrier through its Similar edge that itself depend on another
 407     // barrier through its Similar edge and rather have the first
 408     // depend on the third.
 409     PhaseIterGVN* igvn = phase->is_IterGVN();
 410     Node* out_res = proj_out(Oop);
 411     for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 412       Node* u = out_res->fast_out(i);
 413       if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
 414           (u->in(Oop) == val || !u->in(Similar)->is_top())) {
 415         igvn->_worklist.push(u);
 416       }
 417     }
 418 
 419     push_dominated_barriers(igvn);
 420   }
 421 
 422   return NULL;
 423 }
 424 
 425 void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
 426   Node* out_res = proj_out_or_null(Oop);
 427   if (out_res == NULL) {
 428     return;
 429   }
 430 
 431   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 432     Node* u = out_res->fast_out(i);
 433     if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
 434       igvn->replace_input_of(u, Similar, igvn->C->top());
 435       --i;
 436       --imax;
 437     }
 438   }
 439 }
 440 
 441 bool LoadBarrierNode::has_true_uses() const {
 442   Node* out_res = proj_out_or_null(Oop);
 443   if (out_res == NULL) {
 444     return false;
 445   }
 446 
 447   for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
 448     Node* u = out_res->fast_out(i);
 449     if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
 450       return true;
 451     }
 452   }
 453 
 454   return false;
 455 }
 456 
 457 // == Accesses ==
 458 
 459 Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
 460   assert(!UseCompressedOops, "Not allowed");
 461   CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
 462   PhaseGVN& gvn = access.kit()->gvn();
 463   Compile* C = Compile::current();
 464   GraphKit* kit = access.kit();
 465 
 466   Node* in_ctrl     = cas->in(MemNode::Control);
 467   Node* in_mem      = cas->in(MemNode::Memory);
 468   Node* in_adr      = cas->in(MemNode::Address);
 469   Node* in_val      = cas->in(MemNode::ValueIn);
 470   Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
 471 
 472   float likely                   = PROB_LIKELY(0.999);
 473 
 474   const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
 475   Compile::AliasType* alias_type = C->alias_type(adr_type);
 476   int alias_idx                  = C->get_alias_index(adr_type);
 477 
 478   // Outer check - true: continue, false: load and check
 479   Node* region   = new RegionNode(3);
 480   Node* phi      = new PhiNode(region, TypeInt::BOOL);
 481   Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
 482 
 483   // Inner check - is the healed ref equal to the expected
 484   Node* region2  = new RegionNode(3);
 485   Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
 486   Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
 487 
 488   // CAS node returns 0 or 1
 489   Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
 490   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 491   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
 492   Node* then    = gvn.transform(new IfTrueNode(iff));
 493   Node* elsen   = gvn.transform(new IfFalseNode(iff));
 494 
 495   Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
 496 
 497   kit->set_memory(scmemproj1, alias_idx);
 498   phi_mem->init_req(1, scmemproj1);
 499   phi_mem2->init_req(2, scmemproj1);
 500 
 501   // CAS fail - reload and heal oop
 502   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
 503   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
 504   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
 505   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
 506 
 507   // Check load
 508   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
 509   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
 510   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
 511   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
 512   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
 513   Node* then2   = gvn.transform(new IfTrueNode(iff2));
 514   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
 515 
 516   // redo CAS
 517   Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
 518   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
 519   kit->set_control(elsen2);
 520   kit->set_memory(scmemproj2, alias_idx);
 521 
 522   // Merge inner flow - check if healed oop was equal too expected.
 523   region2->set_req(1, kit->control());
 524   region2->set_req(2, then2);
 525   phi2->set_req(1, cas2);
 526   phi2->set_req(2, kit->intcon(0));
 527   phi_mem2->init_req(1, scmemproj2);
 528   kit->set_memory(phi_mem2, alias_idx);
 529 
 530   // Merge outer flow - then check if first CAS succeeded
 531   region->set_req(1, then);
 532   region->set_req(2, region2);
 533   phi->set_req(1, kit->intcon(1));
 534   phi->set_req(2, phi2);
 535   phi_mem->init_req(2, phi_mem2);
 536   kit->set_memory(phi_mem, alias_idx);
 537 
 538   gvn.transform(region2);
 539   gvn.transform(phi2);
 540   gvn.transform(phi_mem2);
 541   gvn.transform(region);
 542   gvn.transform(phi);
 543   gvn.transform(phi_mem);
 544 
 545   kit->set_control(region);
 546   kit->insert_mem_bar(Op_MemBarCPUOrder);
 547 
 548   return phi;
 549 }
 550 
 551 Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
 552   CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
 553   GraphKit* kit = access.kit();
 554   PhaseGVN& gvn = kit->gvn();
 555   Compile* C = Compile::current();
 556 
 557   Node* in_ctrl     = cmpx->in(MemNode::Control);
 558   Node* in_mem      = cmpx->in(MemNode::Memory);
 559   Node* in_adr      = cmpx->in(MemNode::Address);
 560   Node* in_val      = cmpx->in(MemNode::ValueIn);
 561   Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
 562 
 563   float likely                   = PROB_LIKELY(0.999);
 564 
 565   const TypePtr *adr_type        = cmpx->get_ptr_type();
 566   Compile::AliasType* alias_type = C->alias_type(adr_type);
 567   int alias_idx                  = C->get_alias_index(adr_type);
 568 
 569   // Outer check - true: continue, false: load and check
 570   Node* region  = new RegionNode(3);
 571   Node* phi     = new PhiNode(region, adr_type);
 572 
 573   // Inner check - is the healed ref equal to the expected
 574   Node* region2 = new RegionNode(3);
 575   Node* phi2    = new PhiNode(region2, adr_type);
 576 
 577   // Check if cmpx succeeded
 578   Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
 579   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
 580   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
 581   Node* then    = gvn.transform(new IfTrueNode(iff));
 582   Node* elsen   = gvn.transform(new IfFalseNode(iff));
 583 
 584   Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
 585   kit->set_memory(scmemproj1, alias_idx);
 586 
 587   // CAS fail - reload and heal oop
 588   Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
 589   Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
 590   Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
 591   Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
 592 
 593   // Check load
 594   Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
 595   Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
 596   Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
 597   Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
 598   IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
 599   Node* then2   = gvn.transform(new IfTrueNode(iff2));
 600   Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
 601 
 602   // Redo CAS
 603   Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
 604   Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
 605   kit->set_control(elsen2);
 606   kit->set_memory(scmemproj2, alias_idx);
 607 
 608   // Merge inner flow - check if healed oop was equal too expected.
 609   region2->set_req(1, kit->control());
 610   region2->set_req(2, then2);
 611   phi2->set_req(1, cmpx2);
 612   phi2->set_req(2, barrierdata);
 613 
 614   // Merge outer flow - then check if first cas succeeded
 615   region->set_req(1, then);
 616   region->set_req(2, region2);
 617   phi->set_req(1, cmpx);
 618   phi->set_req(2, phi2);
 619 
 620   gvn.transform(region2);
 621   gvn.transform(phi2);
 622   gvn.transform(region);
 623   gvn.transform(phi);
 624 
 625   kit->set_control(region);
 626   kit->set_memory(in_mem, alias_idx);
 627   kit->insert_mem_bar(Op_MemBarCPUOrder);
 628 
 629   return phi;
 630 }
 631 
 632 Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
 633   PhaseGVN& gvn = kit->gvn();
 634   Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
 635   Node* transformed_barrier = gvn.transform(barrier);
 636 
 637   if (transformed_barrier->is_LoadBarrier()) {
 638     if (barrier == transformed_barrier) {
 639       kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
 640     }
 641     return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
 642   } else {
 643     return val;
 644   }
 645 }
 646 
 647 static bool barrier_needed(C2Access access) {
 648   return ZBarrierSet::barrier_needed(access.decorators(), access.type());
 649 }
 650 
 651 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 652   Node* p = BarrierSetC2::load_at_resolved(access, val_type);
 653   if (!barrier_needed(access)) {
 654     return p;
 655   }
 656 
 657   bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
 658 
 659   GraphKit* kit = access.kit();
 660   PhaseGVN& gvn = kit->gvn();
 661   Node* adr = access.addr().node();
 662   Node* heap_base_oop = access.base();
 663   bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
 664   if (unsafe) {
 665     if (!ZVerifyLoadBarriers) {
 666       p = load_barrier(kit, p, adr);
 667     } else {
 668       if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
 669         p = load_barrier(kit, p, adr);
 670       } else {
 671         IdealKit ideal(kit);
 672         IdealVariable res(ideal);
 673 #define __ ideal.
 674         __ declarations_done();
 675         __ set(res, p);
 676         __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
 677           kit->sync_kit(ideal);
 678           p = load_barrier(kit, p, adr);
 679           __ set(res, p);
 680           __ sync_kit(kit);
 681         } __ end_if();
 682         kit->final_sync(ideal);
 683         p = __ value(res);
 684 #undef __
 685       }
 686     }
 687     return p;
 688   } else {
 689     return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
 690   }
 691 }
 692 
 693 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
 694                                                     Node* new_val, const Type* val_type) const {
 695   Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
 696   if (!barrier_needed(access)) {
 697     return result;
 698   }
 699 
 700   access.set_needs_pinning(false);
 701   return make_cmpx_loadbarrier(access);
 702 }
 703 
 704 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
 705                                                      Node* new_val, const Type* value_type) const {
 706   Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 707   if (!barrier_needed(access)) {
 708     return result;
 709   }
 710 
 711   Node* load_store = access.raw_access();
 712   bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
 713   bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
 714 
 715   if (!expected_is_null) {
 716     if (weak_cas) {
 717       access.set_needs_pinning(false);
 718       load_store = make_cas_loadbarrier(access);
 719     } else {
 720       access.set_needs_pinning(false);
 721       load_store = make_cas_loadbarrier(access);
 722     }
 723   }
 724 
 725   return load_store;
 726 }
 727 
 728 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
 729   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
 730   if (!barrier_needed(access)) {
 731     return result;
 732   }
 733 
 734   Node* load_store = access.raw_access();
 735   Node* adr = access.addr().node();
 736 
 737   return load_barrier(access.kit(), load_store, adr, false, false, false);
 738 }
 739 
 740 // == Macro Expansion ==
 741 
 742 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
 743   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 744   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
 745   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
 746   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
 747 
 748   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 749   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
 750 
 751   PhaseIterGVN &igvn = phase->igvn();
 752 
 753   if (ZVerifyLoadBarriers) {
 754     igvn.replace_node(out_res, in_val);
 755     igvn.replace_node(out_ctrl, in_ctrl);
 756     return;
 757   }
 758 
 759   if (barrier->can_be_eliminated()) {
 760     // Clone and pin the load for this barrier below the dominating
 761     // barrier: the load cannot be allowed to float above the
 762     // dominating barrier
 763     Node* load = in_val;
 764 
 765     if (load->is_Load()) {
 766       Node* new_load = load->clone();
 767       Node* addp = new_load->in(MemNode::Address);
 768       assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
 769       Node* cast = new CastPPNode(addp, igvn.type(addp), true);
 770       Node* ctrl = NULL;
 771       Node* similar = barrier->in(LoadBarrierNode::Similar);
 772       if (similar->is_Phi()) {
 773         // already expanded
 774         ctrl = similar->in(0);
 775       } else {
 776         assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
 777         ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
 778       }
 779       assert(ctrl != NULL, "bad control");
 780       cast->set_req(0, ctrl);
 781       igvn.transform(cast);
 782       new_load->set_req(MemNode::Address, cast);
 783       igvn.transform(new_load);
 784 
 785       igvn.replace_node(out_res, new_load);
 786       igvn.replace_node(out_ctrl, in_ctrl);
 787       return;
 788     }
 789     // cannot eliminate
 790   }
 791 
 792   // There are two cases that require the basic loadbarrier
 793   // 1) When the writeback of a healed oop must be avoided (swap)
 794   // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
 795   if (!barrier->is_writeback()) {
 796     assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
 797   }
 798 
 799   if (!barrier->oop_reload_allowed()) {
 800     expand_loadbarrier_basic(phase, barrier);
 801   } else {
 802     expand_loadbarrier_optimized(phase, barrier);
 803   }
 804 }
 805 
 806 // Basic loadbarrier using conventional argument passing
 807 void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
 808   PhaseIterGVN &igvn = phase->igvn();
 809 
 810   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 811   Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
 812   Node* in_val  = barrier->in(LoadBarrierNode::Oop);
 813   Node* in_adr  = barrier->in(LoadBarrierNode::Address);
 814 
 815   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 816   Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
 817 
 818   float unlikely  = PROB_UNLIKELY(0.999);
 819   const Type* in_val_maybe_null_t = igvn.type(in_val);
 820 
 821   Node* jthread = igvn.transform(new ThreadLocalNode());
 822   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 823   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
 824   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 825   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 826   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 827   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 828   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 829   Node* then = igvn.transform(new IfTrueNode(iff));
 830   Node* elsen = igvn.transform(new IfFalseNode(iff));
 831 
 832   Node* result_region;
 833   Node* result_val;
 834 
 835   result_region = new RegionNode(3);
 836   result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
 837 
 838   result_region->set_req(1, elsen);
 839   Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
 840   res->init_req(0, elsen);
 841   result_val->set_req(1, res);
 842 
 843   const TypeFunc *tf = load_barrier_Type();
 844   Node* call;
 845   if (barrier->is_weak()) {
 846     call = new CallLeafNode(tf,
 847                             ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
 848                             "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
 849                             TypeRawPtr::BOTTOM);
 850   } else {
 851     call = new CallLeafNode(tf,
 852                             ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
 853                             "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
 854                             TypeRawPtr::BOTTOM);
 855   }
 856 
 857   call->init_req(TypeFunc::Control, then);
 858   call->init_req(TypeFunc::I_O    , phase->top());
 859   call->init_req(TypeFunc::Memory , in_mem);
 860   call->init_req(TypeFunc::FramePtr, phase->top());
 861   call->init_req(TypeFunc::ReturnAdr, phase->top());
 862   call->init_req(TypeFunc::Parms+0, in_val);
 863   if (barrier->is_writeback()) {
 864     call->init_req(TypeFunc::Parms+1, in_adr);
 865   } else {
 866     // When slow path is called with a null address, the healed oop will not be written back
 867     call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
 868   }
 869   call = igvn.transform(call);
 870 
 871   Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
 872   res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
 873   res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
 874 
 875   result_region->set_req(2, ctrl);
 876   result_val->set_req(2, res);
 877 
 878   result_region = igvn.transform(result_region);
 879   result_val = igvn.transform(result_val);
 880 
 881   if (out_ctrl != NULL) { // Added if cond
 882     igvn.replace_node(out_ctrl, result_region);
 883   }
 884   igvn.replace_node(out_res, result_val);
 885 }
 886 
 887 // Optimized, low spill, loadbarrier variant using stub specialized on register used
 888 void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
 889   PhaseIterGVN &igvn = phase->igvn();
 890 #ifdef PRINT_NODE_TRAVERSALS
 891   Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
 892 #endif
 893 
 894   Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
 895   Node* in_mem = barrier->in(LoadBarrierNode::Memory);
 896   Node* in_val = barrier->in(LoadBarrierNode::Oop);
 897   Node* in_adr = barrier->in(LoadBarrierNode::Address);
 898 
 899   Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
 900   Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
 901 
 902   assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
 903 
 904 #ifdef PRINT_NODE_TRAVERSALS
 905   tty->print("\n\n\nBefore barrier optimization:\n");
 906   traverse(barrier, out_ctrl, out_res, -1);
 907 
 908   tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
 909   traverse(preceding_barrier_node, out_ctrl, out_res, -1);
 910 #endif
 911 
 912   float unlikely  = PROB_UNLIKELY(0.999);
 913 
 914   Node* jthread = igvn.transform(new ThreadLocalNode());
 915   Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
 916   Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
 917                                                  TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
 918                                                  MemNode::unordered));
 919   Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
 920   Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
 921   Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
 922   Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
 923   IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
 924   Node* then = igvn.transform(new IfTrueNode(iff));
 925   Node* elsen = igvn.transform(new IfFalseNode(iff));
 926 
 927   Node* slow_path_surrogate;
 928   if (!barrier->is_weak()) {
 929     slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
 930                                                                     (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
 931   } else {
 932     slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
 933                                                                         (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
 934   }
 935 
 936   Node *new_loadp;
 937   new_loadp = slow_path_surrogate;
 938   // Create the final region/phi pair to converge cntl/data paths to downstream code
 939   Node* result_region = igvn.transform(new RegionNode(3));
 940   result_region->set_req(1, then);
 941   result_region->set_req(2, elsen);
 942 
 943   Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
 944   result_phi->set_req(1, new_loadp);
 945   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
 946 
 947   // Finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
 948   // igvn.replace_node(out_ctrl, result_region);
 949   if (out_ctrl != NULL) { // added if cond
 950     igvn.replace_node(out_ctrl, result_region);
 951   }
 952   igvn.replace_node(out_res, result_phi);
 953 
 954   assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
 955 
 956 #ifdef PRINT_NODE_TRAVERSALS
 957   tty->print("\nAfter barrier optimization:  old out_ctrl\n");
 958   traverse(out_ctrl, out_ctrl, out_res, -1);
 959   tty->print("\nAfter barrier optimization:  old out_res\n");
 960   traverse(out_res, out_ctrl, out_res, -1);
 961   tty->print("\nAfter barrier optimization:  old barrier\n");
 962   traverse(barrier, out_ctrl, out_res, -1);
 963   tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
 964   traverse(preceding_barrier_node, result_region, result_phi, -1);
 965 #endif
 966 
 967   return;
 968 }
 969 
 970 bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
 971   Compile* C = Compile::current();
 972   PhaseIterGVN &igvn = macro->igvn();
 973   ZBarrierSetC2State* s = state();
 974   if (s->load_barrier_count() > 0) {
 975 #ifdef ASSERT
 976     verify_gc_barriers(false);
 977 #endif
 978     igvn.set_delay_transform(true);
 979     int skipped = 0;
 980     while (s->load_barrier_count() > skipped) {
 981       int load_barrier_count = s->load_barrier_count();
 982       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
 983       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
 984         // Node is unreachable, so don't try to expand it
 985         s->remove_load_barrier_node(n);
 986         continue;
 987       }
 988       if (!n->can_be_eliminated()) {
 989         skipped++;
 990         continue;
 991       }
 992       expand_loadbarrier_node(macro, n);
 993       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
 994       if (C->failing())  return true;
 995     }
 996     while (s->load_barrier_count() > 0) {
 997       int load_barrier_count = s->load_barrier_count();
 998       LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
 999       assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
1000       assert(!n->can_be_eliminated(), "should have been processed already");
1001       expand_loadbarrier_node(macro, n);
1002       assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
1003       if (C->failing())  return true;
1004     }
1005     igvn.set_delay_transform(false);
1006     igvn.optimize();
1007     if (C->failing())  return true;
1008   }
1009   return false;
1010 }
1011 
1012 // == Loop optimization ==
1013 
1014 static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
1015   PhaseIterGVN &igvn = phase->igvn();
1016   Compile* C = Compile::current();
1017 
1018   LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
1019   if (lb2 != NULL) {
1020     if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
1021       assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
1022       igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
1023       C->set_major_progress();
1024     } else  {
1025       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1026       lb->fix_similar_in_uses(&igvn);
1027 
1028       Node* val = lb->proj_out(LoadBarrierNode::Oop);
1029       assert(lb2->has_true_uses(), "");
1030       assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
1031 
1032       phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
1033       phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
1034       igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
1035 
1036       return true;
1037     }
1038   }
1039   return false;
1040 }
1041 
1042 static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
1043   assert(dom->is_Region() || i == -1, "");
1044   Node* m = mem;
1045   while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
1046     if (m->is_Mem()) {
1047       assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
1048       m = m->in(MemNode::Memory);
1049     } else if (m->is_MergeMem()) {
1050       m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1051     } else if (m->is_Phi()) {
1052       if (m->in(0) == dom && i != -1) {
1053         m = m->in(i);
1054         break;
1055       } else {
1056         m = m->in(LoopNode::EntryControl);
1057       }
1058     } else if (m->is_Proj()) {
1059       m = m->in(0);
1060     } else if (m->is_SafePoint() || m->is_MemBar()) {
1061       m = m->in(TypeFunc::Memory);
1062     } else {
1063 #ifdef ASSERT
1064       m->dump();
1065 #endif
1066       ShouldNotReachHere();
1067     }
1068   }
1069   return m;
1070 }
1071 
1072 static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
1073   PhaseIterGVN &igvn = phase->igvn();
1074   Compile* C = Compile::current();
1075   Node* the_clone = lb->clone();
1076   the_clone->set_req(LoadBarrierNode::Control, ctl);
1077   the_clone->set_req(LoadBarrierNode::Memory, mem);
1078   if (oop_in != NULL) {
1079     the_clone->set_req(LoadBarrierNode::Oop, oop_in);
1080   }
1081 
1082   LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
1083   igvn.register_new_node_with_optimizer(new_lb);
1084   IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
1085   phase->set_ctrl(new_lb, new_lb->in(0));
1086   phase->set_loop(new_lb, loop);
1087   phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
1088   if (!loop->_child) {
1089     loop->_body.push(new_lb);
1090   }
1091 
1092   Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
1093   igvn.register_new_node_with_optimizer(proj_ctl);
1094   phase->set_ctrl(proj_ctl, proj_ctl->in(0));
1095   phase->set_loop(proj_ctl, loop);
1096   phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
1097   if (!loop->_child) {
1098     loop->_body.push(proj_ctl);
1099   }
1100 
1101   Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
1102   phase->register_new_node(proj_oop, new_lb);
1103 
1104   if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
1105     LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
1106     if (!phase->is_dominator(similar, ctl)) {
1107       igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
1108     }
1109   }
1110 
1111   return new_lb;
1112 }
1113 
1114 static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
1115   PhaseIterGVN &igvn = phase->igvn();
1116   Node* val = lb->proj_out(LoadBarrierNode::Oop);
1117   igvn.replace_node(val, new_val);
1118   phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
1119   phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
1120 }
1121 
1122 static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1123   PhaseIterGVN &igvn = phase->igvn();
1124   Compile* C = Compile::current();
1125 
1126   if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
1127     Node* oop_phi = lb->in(LoadBarrierNode::Oop);
1128 
1129     if (oop_phi->in(2) == oop_phi) {
1130       // Ignore phis with only one input
1131       return false;
1132     }
1133 
1134     if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
1135                             oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
1136       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1137       lb->fix_similar_in_uses(&igvn);
1138 
1139       RegionNode* region = oop_phi->in(0)->as_Region();
1140 
1141       int backedge = LoopNode::LoopBackControl;
1142       if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
1143         Node* c = region->in(backedge)->in(0)->in(0);
1144         assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
1145         Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
1146         Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
1147         if (!phase->is_dominator(oop_c, c)) {
1148           return false;
1149         }
1150       }
1151 
1152       // If the node on the backedge above the phi is the node itself - we have a self loop.
1153       // Don't clone - this will be folded later.
1154       if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
1155         return false;
1156       }
1157 
1158       bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
1159       Node *phi = oop_phi->clone();
1160 
1161       for (uint i = 1; i < region->req(); i++) {
1162         Node* ctrl = region->in(i);
1163         if (ctrl != C->top()) {
1164           assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
1165 
1166           Node* mem = lb->in(LoadBarrierNode::Memory);
1167           Node* m = find_dominating_memory(phase, mem, region, i);
1168 
1169           if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
1170             ctrl = ctrl->in(0)->in(0);
1171           } else if (region->is_Loop() && is_strip_mined) {
1172             // If this is a strip mined loop, control must move above OuterStripMinedLoop
1173             assert(i == LoopNode::EntryControl, "check");
1174             assert(ctrl->is_OuterStripMinedLoop(), "sanity");
1175             ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
1176           }
1177 
1178           LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
1179           Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
1180 
1181           if (is_strip_mined && (i == LoopNode::EntryControl)) {
1182             assert(region->in(i)->is_OuterStripMinedLoop(), "");
1183             igvn.replace_input_of(region->in(i), i, out_ctrl);
1184             phase->set_idom(region->in(i), out_ctrl, phase->dom_depth(out_ctrl));
1185           } else if (ctrl == region->in(i)) {
1186             igvn.replace_input_of(region, i, out_ctrl);
1187             // Only update the idom if is the loop entry we are updating
1188             // - A loop backedge doesn't change the idom
1189             if (region->is_Loop() && i == LoopNode::EntryControl) {
1190               phase->set_idom(region, out_ctrl, phase->dom_depth(out_ctrl));
1191             }
1192           } else {
1193             Node* iff = region->in(i)->in(0);
1194             igvn.replace_input_of(iff, 0, out_ctrl);
1195             phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
1196           }
1197           phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
1198         }
1199       }
1200       phase->register_new_node(phi, region);
1201       replace_barrier(phase, lb, phi);
1202 
1203       if (region->is_Loop()) {
1204         // Load barrier moved to the back edge of the Loop may now
1205         // have a safepoint on the path to the barrier on the Similar
1206         // edge
1207         igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
1208         Node* head = region->in(LoopNode::EntryControl);
1209         phase->set_idom(region, head, phase->dom_depth(head)+1);
1210         phase->recompute_dom_depth();
1211         if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
1212           head->as_CountedLoop()->set_normal_loop();
1213         }
1214       }
1215 
1216       return true;
1217     }
1218   }
1219 
1220   return false;
1221 }
1222 
1223 static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1224   PhaseIterGVN &igvn = phase->igvn();
1225   IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
1226   if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
1227     Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
1228     IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
1229     IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
1230     if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
1231       // That transformation may cause the Similar edge on dominated load barriers to be invalid
1232       lb->fix_similar_in_uses(&igvn);
1233 
1234       Node* head = lb_loop->_head;
1235       assert(head->is_Loop(), "");
1236 
1237       if (phase->is_dominator(head, oop_ctrl)) {
1238         assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
1239         assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
1240         return false;
1241       }
1242 
1243       if (head->is_CountedLoop()) {
1244         CountedLoopNode* cloop = head->as_CountedLoop();
1245         if (cloop->is_main_loop()) {
1246           cloop->set_normal_loop();
1247         }
1248         // When we are moving barrier out of a counted loop,
1249         // make sure we move it all the way out of the strip mined outer loop.
1250         if (cloop->is_strip_mined()) {
1251           head = cloop->outer_loop();
1252         }
1253       }
1254 
1255       Node* mem = lb->in(LoadBarrierNode::Memory);
1256       Node* m = find_dominating_memory(phase, mem, head, -1);
1257 
1258       LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
1259 
1260       assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
1261       Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
1262       igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
1263       phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
1264 
1265       replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
1266 
1267       phase->recompute_dom_depth();
1268 
1269       return true;
1270     }
1271   }
1272 
1273   return false;
1274 }
1275 
1276 static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
1277   PhaseIterGVN &igvn = phase->igvn();
1278   Node* in_val = lb->in(LoadBarrierNode::Oop);
1279   for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
1280     Node* u = in_val->fast_out(i);
1281     if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
1282       Node* this_ctrl = lb->in(LoadBarrierNode::Control);
1283       Node* other_ctrl = u->in(LoadBarrierNode::Control);
1284 
1285       Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
1286       bool ok = true;
1287 
1288       Node* proj1 = NULL;
1289       Node* proj2 = NULL;
1290 
1291       while (this_ctrl != lca && ok) {
1292         if (this_ctrl->in(0) != NULL &&
1293             this_ctrl->in(0)->is_MultiBranch()) {
1294           if (this_ctrl->in(0)->in(0) == lca) {
1295             assert(proj1 == NULL, "");
1296             assert(this_ctrl->is_Proj(), "");
1297             proj1 = this_ctrl;
1298           } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
1299             ok = false;
1300           }
1301         }
1302         this_ctrl = phase->idom(this_ctrl);
1303       }
1304       while (other_ctrl != lca && ok) {
1305         if (other_ctrl->in(0) != NULL &&
1306             other_ctrl->in(0)->is_MultiBranch()) {
1307           if (other_ctrl->in(0)->in(0) == lca) {
1308             assert(other_ctrl->is_Proj(), "");
1309             assert(proj2 == NULL, "");
1310             proj2 = other_ctrl;
1311           } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
1312             ok = false;
1313           }
1314         }
1315         other_ctrl = phase->idom(other_ctrl);
1316       }
1317       assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
1318       if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
1319         // That transformation may cause the Similar edge on dominated load barriers to be invalid
1320         lb->fix_similar_in_uses(&igvn);
1321         u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
1322 
1323         Node* split = lca->unique_ctrl_out();
1324         assert(split->in(0) == lca, "");
1325 
1326         Node* mem = lb->in(LoadBarrierNode::Memory);
1327         Node* m = find_dominating_memory(phase, mem, split, -1);
1328         LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
1329 
1330         Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
1331         igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
1332         phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
1333 
1334         Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
1335         replace_barrier(phase, lb, proj_oop);
1336         replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
1337 
1338         phase->recompute_dom_depth();
1339 
1340         return true;
1341       }
1342     }
1343   }
1344 
1345   return false;
1346 }
1347 
1348 static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
1349   Compile* C = Compile::current();
1350 
1351   if (!C->directive()->ZOptimizeLoadBarriersOption) {
1352     return;
1353   }
1354 
1355   if (lb->has_true_uses()) {
1356     if (replace_with_dominating_barrier(phase, lb, last_round)) {
1357       return;
1358     }
1359 
1360     if (split_barrier_thru_phi(phase, lb)) {
1361       return;
1362     }
1363 
1364     if (move_out_of_loop(phase, lb)) {
1365       return;
1366     }
1367 
1368     if (common_barriers(phase, lb)) {
1369       return;
1370     }
1371   }
1372 }
1373 
1374 void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
1375   if (node->is_LoadBarrier()) {
1376     optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
1377   }
1378 }
1379 
1380 // == Verification ==
1381 
1382 #ifdef ASSERT
1383 
1384 static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
1385   if (visited.test_set(n->_idx)) {
1386     return true;
1387   }
1388 
1389   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1390     Node* u = n->fast_out(i);
1391     if (u->is_LoadBarrier()) {
1392     } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
1393       if (!look_for_barrier(u, post_parse, visited)) {
1394         return false;
1395       }
1396     } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
1397       if (!look_for_barrier(u, post_parse, visited)) {
1398         return false;
1399       }
1400     } else if (u->Opcode() != Op_SCMemProj) {
1401       tty->print("bad use"); u->dump();
1402       return false;
1403     }
1404   }
1405 
1406   return true;
1407 }
1408 
1409 void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
1410   ZBarrierSetC2State* s = state();
1411   Compile* C = Compile::current();
1412   ResourceMark rm;
1413   VectorSet visited(Thread::current()->resource_area());
1414   for (int i = 0; i < s->load_barrier_count(); i++) {
1415     LoadBarrierNode* n = s->load_barrier_node(i);
1416 
1417     // The dominating barrier on the same address if it exists and
1418     // this barrier must not be applied on the value from the same
1419     // load otherwise the value is not reloaded before it's used the
1420     // second time.
1421     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
1422            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
1423             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
1424             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
1425            "broken similar edge");
1426 
1427     assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
1428            "found unneeded load barrier");
1429 
1430     // Several load barrier nodes chained through their Similar edge
1431     // break the code that remove the barriers in final graph reshape.
1432     assert(n->in(LoadBarrierNode::Similar)->is_top() ||
1433            (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
1434             n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
1435            "chain of Similar load barriers");
1436 
1437     if (!n->in(LoadBarrierNode::Similar)->is_top()) {
1438       ResourceMark rm;
1439       Unique_Node_List wq;
1440       Node* other = n->in(LoadBarrierNode::Similar)->in(0);
1441       wq.push(n);
1442       bool ok = true;
1443       bool dom_found = false;
1444       for (uint next = 0; next < wq.size(); ++next) {
1445         Node *n = wq.at(next);
1446         assert(n->is_CFG(), "");
1447         assert(!n->is_SafePoint(), "");
1448 
1449         if (n == other) {
1450           continue;
1451         }
1452 
1453         if (n->is_Region()) {
1454           for (uint i = 1; i < n->req(); i++) {
1455             Node* m = n->in(i);
1456             if (m != NULL) {
1457               wq.push(m);
1458             }
1459           }
1460         } else {
1461           Node* m = n->in(0);
1462           if (m != NULL) {
1463             wq.push(m);
1464           }
1465         }
1466       }
1467     }
1468 
1469     if (ZVerifyLoadBarriers) {
1470       if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
1471         visited.Clear();
1472         bool found = look_for_barrier(n, post_parse, visited);
1473         if (!found) {
1474           n->dump(1);
1475           n->dump(-3);
1476           stringStream ss;
1477           C->method()->print_short_name(&ss);
1478           tty->print_cr("-%s-", ss.as_string());
1479           assert(found, "");
1480         }
1481       }
1482     }
1483   }
1484 }
1485 
1486 #endif