1 /*
   2  * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "code/vmreg.inline.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/tlab_globals.hpp"
  29 #include "opto/arraycopynode.hpp"
  30 #include "opto/block.hpp"
  31 #include "opto/convertnode.hpp"
  32 #include "opto/graphKit.hpp"
  33 #include "opto/idealKit.hpp"
  34 #include "opto/macro.hpp"
  35 #include "opto/narrowptrnode.hpp"
  36 #include "opto/output.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "utilities/macros.hpp"
  40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
  41 
  42 // By default this is a no-op.
  43 void BarrierSetC2::resolve_address(C2Access& access) const { }
  44 
  45 void* C2ParseAccess::barrier_set_state() const {
  46   return _kit->barrier_set_state();
  47 }
  48 
  49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
  50 
  51 bool C2Access::needs_cpu_membar() const {
  52   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
  53   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
  54 
  55   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
  56   bool in_heap   = (_decorators & IN_HEAP) != 0;
  57   bool in_native = (_decorators & IN_NATIVE) != 0;
  58   bool is_mixed  = !in_heap && !in_native;
  59 
  60   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
  61   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
  62   bool is_atomic = is_read && is_write;
  63 
  64   if (is_atomic) {
  65     // Atomics always need to be wrapped in CPU membars
  66     return true;
  67   }
  68 
  69   if (anonymous) {
  70     // We will need memory barriers unless we can determine a unique
  71     // alias category for this reference.  (Note:  If for some reason
  72     // the barriers get omitted and the unsafe reference begins to "pollute"
  73     // the alias analysis of the rest of the graph, either Compile::can_alias
  74     // or Compile::must_alias will throw a diagnostic assert.)
  75     if (is_mixed || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) {
  76       return true;
  77     }
  78   } else {
  79     assert(!is_mixed, "not unsafe");
  80   }
  81 
  82   return false;
  83 }
  84 
  85 static BarrierSetC2State* barrier_set_state() {
  86   return reinterpret_cast<BarrierSetC2State*>(Compile::current()->barrier_set_state());
  87 }
  88 
  89 RegMask& BarrierStubC2::live() const {
  90   return *barrier_set_state()->live(_node);
  91 }
  92 
  93 BarrierStubC2::BarrierStubC2(const MachNode* node)
  94   : _node(node),
  95     _entry(),
  96     _continuation(),
  97     _preserve(live()) {}
  98 
  99 Label* BarrierStubC2::entry() {
 100   // The _entry will never be bound when in_scratch_emit_size() is true.
 101   // However, we still need to return a label that is not bound now, but
 102   // will eventually be bound. Any eventually bound label will do, as it
 103   // will only act as a placeholder, so we return the _continuation label.
 104   return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
 105 }
 106 
 107 Label* BarrierStubC2::continuation() {
 108   return &_continuation;
 109 }
 110 
 111 uint8_t BarrierStubC2::barrier_data() const {
 112   return _node->barrier_data();
 113 }
 114 
 115 void BarrierStubC2::preserve(Register r) {
 116   const VMReg vm_reg = r->as_VMReg();
 117   assert(vm_reg->is_Register(), "r must be a general-purpose register");
 118   _preserve.insert(OptoReg::as_OptoReg(vm_reg));
 119 }
 120 
 121 void BarrierStubC2::dont_preserve(Register r) {
 122   VMReg vm_reg = r->as_VMReg();
 123   assert(vm_reg->is_Register(), "r must be a general-purpose register");
 124   // Subtract the given register and all its sub-registers (e.g. {R11, R11_H}
 125   // for r11 in aarch64).
 126   do {
 127     _preserve.remove(OptoReg::as_OptoReg(vm_reg));
 128     vm_reg = vm_reg->next();
 129   } while (vm_reg->is_Register() && !vm_reg->is_concrete());
 130 }
 131 
 132 const RegMask& BarrierStubC2::preserve_set() const {
 133   return _preserve;
 134 }
 135 
 136 Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 137   DecoratorSet decorators = access.decorators();
 138 
 139   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 140   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 141   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 142   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 143 
 144   MemNode::MemOrd mo = access.mem_node_mo();
 145 
 146   Node* store;
 147   BasicType bt = access.type();
 148   if (access.is_parse_access()) {
 149     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 150 
 151     GraphKit* kit = parse_access.kit();
 152     store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), bt,
 153                                  mo, requires_atomic_access, unaligned, mismatched,
 154                                  unsafe, access.barrier_data());
 155   } else {
 156     assert(access.is_opt_access(), "either parse or opt access");
 157     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 158     Node* ctl = opt_access.ctl();
 159     MergeMemNode* mm = opt_access.mem();
 160     PhaseGVN& gvn = opt_access.gvn();
 161     const TypePtr* adr_type = access.addr().type();
 162     int alias = gvn.C->get_alias_index(adr_type);
 163     Node* mem = mm->memory_at(alias);
 164 
 165     StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), bt, mo, requires_atomic_access);
 166     if (unaligned) {
 167       st->set_unaligned_access();
 168     }
 169     if (mismatched) {
 170       st->set_mismatched_access();
 171     }
 172     st->set_barrier_data(access.barrier_data());
 173     store = gvn.transform(st);
 174     if (store == st) {
 175       mm->set_memory_at(alias, st);
 176     }
 177   }
 178   access.set_raw_access(store);
 179 
 180   return store;
 181 }
 182 
 183 Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 184   DecoratorSet decorators = access.decorators();
 185 
 186   Node* adr = access.addr().node();
 187   const TypePtr* adr_type = access.addr().type();
 188 
 189   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 190   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 191   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 192   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 193   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
 194   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 195   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
 196 
 197   MemNode::MemOrd mo = access.mem_node_mo();
 198   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
 199 
 200   Node* load;
 201   if (access.is_parse_access()) {
 202     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 203     GraphKit* kit = parse_access.kit();
 204     Node* control = control_dependent ? kit->control() : nullptr;
 205 
 206     if (immutable) {
 207       Compile* C = Compile::current();
 208       Node* mem = kit->immutable_memory();
 209       load = LoadNode::make(kit->gvn(), control, mem, adr,
 210                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
 211                             unaligned, mismatched, unsafe, access.barrier_data());
 212       load = kit->gvn().transform(load);
 213     } else {
 214       load = kit->make_load(control, adr, val_type, access.type(), mo,
 215                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
 216                             access.barrier_data());
 217     }
 218   } else {
 219     assert(access.is_opt_access(), "either parse or opt access");
 220     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 221     Node* control = control_dependent ? opt_access.ctl() : nullptr;
 222     MergeMemNode* mm = opt_access.mem();
 223     PhaseGVN& gvn = opt_access.gvn();
 224     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
 225     load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep,
 226                           requires_atomic_access, unaligned, mismatched, unsafe, access.barrier_data());
 227     load = gvn.transform(load);
 228   }
 229   access.set_raw_access(load);
 230 
 231   return load;
 232 }
 233 
 234 class C2AccessFence: public StackObj {
 235   C2Access& _access;
 236   Node* _leading_membar;
 237 
 238 public:
 239   C2AccessFence(C2Access& access) :
 240     _access(access), _leading_membar(nullptr) {
 241     GraphKit* kit = nullptr;
 242     if (access.is_parse_access()) {
 243       C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 244       kit = parse_access.kit();
 245     }
 246     DecoratorSet decorators = access.decorators();
 247 
 248     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
 249     bool is_read = (decorators & C2_READ_ACCESS) != 0;
 250     bool is_atomic = is_read && is_write;
 251 
 252     bool is_volatile = (decorators & MO_SEQ_CST) != 0;
 253     bool is_release = (decorators & MO_RELEASE) != 0;
 254 
 255     if (is_atomic) {
 256       assert(kit != nullptr, "unsupported at optimization time");
 257       // Memory-model-wise, a LoadStore acts like a little synchronized
 258       // block, so needs barriers on each side.  These don't translate
 259       // into actual barriers on most machines, but we still need rest of
 260       // compiler to respect ordering.
 261       if (is_release) {
 262         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
 263       } else if (is_volatile) {
 264         if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 265           _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
 266         } else {
 267           _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
 268         }
 269       }
 270     } else if (is_write) {
 271       // If reference is volatile, prevent following memory ops from
 272       // floating down past the volatile write.  Also prevents commoning
 273       // another volatile read.
 274       if (is_volatile || is_release) {
 275         assert(kit != nullptr, "unsupported at optimization time");
 276         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
 277       }
 278     } else {
 279       // Memory barrier to prevent normal and 'unsafe' accesses from
 280       // bypassing each other.  Happens after null checks, so the
 281       // exception paths do not take memory state from the memory barrier,
 282       // so there's no problems making a strong assert about mixing users
 283       // of safe & unsafe memory.
 284       if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
 285         assert(kit != nullptr, "unsupported at optimization time");
 286         _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
 287       }
 288     }
 289 
 290     if (access.needs_cpu_membar()) {
 291       assert(kit != nullptr, "unsupported at optimization time");
 292       kit->insert_mem_bar(Op_MemBarCPUOrder);
 293     }
 294 
 295     if (is_atomic) {
 296       // 4984716: MemBars must be inserted before this
 297       //          memory node in order to avoid a false
 298       //          dependency which will confuse the scheduler.
 299       access.set_memory();
 300     }
 301   }
 302 
 303   ~C2AccessFence() {
 304     GraphKit* kit = nullptr;
 305     if (_access.is_parse_access()) {
 306       C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(_access);
 307       kit = parse_access.kit();
 308     }
 309     DecoratorSet decorators = _access.decorators();
 310 
 311     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
 312     bool is_read = (decorators & C2_READ_ACCESS) != 0;
 313     bool is_atomic = is_read && is_write;
 314 
 315     bool is_volatile = (decorators & MO_SEQ_CST) != 0;
 316     bool is_acquire = (decorators & MO_ACQUIRE) != 0;
 317 
 318     // If reference is volatile, prevent following volatiles ops from
 319     // floating up before the volatile access.
 320     if (_access.needs_cpu_membar()) {
 321       kit->insert_mem_bar(Op_MemBarCPUOrder);
 322     }
 323 
 324     if (is_atomic) {
 325       assert(kit != nullptr, "unsupported at optimization time");
 326       if (is_acquire || is_volatile) {
 327         Node* n = _access.raw_access();
 328         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
 329         if (_leading_membar != nullptr) {
 330           MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
 331         }
 332       }
 333     } else if (is_write) {
 334       // If not multiple copy atomic, we do the MemBarVolatile before the load.
 335       if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 336         assert(kit != nullptr, "unsupported at optimization time");
 337         Node* n = _access.raw_access();
 338         Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
 339         if (_leading_membar != nullptr) {
 340           MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
 341         }
 342       }
 343     } else {
 344       if (is_volatile || is_acquire) {
 345         assert(kit != nullptr, "unsupported at optimization time");
 346         Node* n = _access.raw_access();
 347         assert(_leading_membar == nullptr || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
 348         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
 349         mb->as_MemBar()->set_trailing_load();
 350       }
 351     }
 352   }
 353 };
 354 
 355 Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
 356   C2AccessFence fence(access);
 357   resolve_address(access);
 358   return store_at_resolved(access, val);
 359 }
 360 
 361 Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
 362   C2AccessFence fence(access);
 363   resolve_address(access);
 364   return load_at_resolved(access, val_type);
 365 }
 366 
 367 MemNode::MemOrd C2Access::mem_node_mo() const {
 368   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
 369   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
 370   if ((_decorators & MO_SEQ_CST) != 0) {
 371     if (is_write && is_read) {
 372       // For atomic operations
 373       return MemNode::seqcst;
 374     } else if (is_write) {
 375       return MemNode::release;
 376     } else {
 377       assert(is_read, "what else?");
 378       return MemNode::acquire;
 379     }
 380   } else if ((_decorators & MO_RELEASE) != 0) {
 381     return MemNode::release;
 382   } else if ((_decorators & MO_ACQUIRE) != 0) {
 383     return MemNode::acquire;
 384   } else if (is_write) {
 385     // Volatile fields need releasing stores.
 386     // Non-volatile fields also need releasing stores if they hold an
 387     // object reference, because the object reference might point to
 388     // a freshly created object.
 389     // Conservatively release stores of object references.
 390     return StoreNode::release_if_reference(_type);
 391   } else {
 392     return MemNode::unordered;
 393   }
 394 }
 395 
 396 void C2Access::fixup_decorators() {
 397   bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0;
 398   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 399 
 400   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
 401   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
 402 
 403   _decorators = AccessInternal::decorator_fixup(_decorators, _type);
 404 
 405   if (is_read && !is_write && anonymous) {
 406     // To be valid, unsafe loads may depend on other conditions than
 407     // the one that guards them: pin the Load node
 408     _decorators |= C2_CONTROL_DEPENDENT_LOAD;
 409     _decorators |= C2_UNKNOWN_CONTROL_LOAD;
 410     const TypePtr* adr_type = _addr.type();
 411     Node* adr = _addr.node();
 412     if (!needs_cpu_membar() && adr_type->isa_instptr()) {
 413       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
 414       intptr_t offset = Type::OffsetBot;
 415       AddPNode::Ideal_base_and_offset(adr, &gvn(), offset);
 416       if (offset >= 0) {
 417         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->instance_klass()->layout_helper());
 418         if (offset < s) {
 419           // Guaranteed to be a valid access, no need to pin it
 420           _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
 421           _decorators ^= C2_UNKNOWN_CONTROL_LOAD;
 422         }
 423       }
 424     }
 425   }
 426 }
 427 
 428 //--------------------------- atomic operations---------------------------------
 429 
 430 void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const {
 431   // SCMemProjNodes represent the memory state of a LoadStore. Their
 432   // main role is to prevent LoadStore nodes from being optimized away
 433   // when their results aren't used.
 434   assert(access.is_parse_access(), "entry not supported at optimization time");
 435   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 436   GraphKit* kit = parse_access.kit();
 437   Node* load_store = access.raw_access();
 438   assert(load_store != nullptr, "must pin atomic op");
 439   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
 440   kit->set_memory(proj, access.alias_idx());
 441 }
 442 
 443 void C2AtomicParseAccess::set_memory() {
 444   Node *mem = _kit->memory(_alias_idx);
 445   _memory = mem;
 446 }
 447 
 448 Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 449                                                    Node* new_val, const Type* value_type) const {
 450   GraphKit* kit = access.kit();
 451   MemNode::MemOrd mo = access.mem_node_mo();
 452   Node* mem = access.memory();
 453 
 454   Node* adr = access.addr().node();
 455   const TypePtr* adr_type = access.addr().type();
 456 
 457   Node* load_store = nullptr;
 458 
 459   if (access.is_oop()) {
 460 #ifdef _LP64
 461     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 462       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 463       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 464       load_store = new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo);
 465     } else
 466 #endif
 467     {
 468       load_store = new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo);
 469     }
 470   } else {
 471     switch (access.type()) {
 472       case T_BYTE: {
 473         load_store = new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
 474         break;
 475       }
 476       case T_SHORT: {
 477         load_store = new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
 478         break;
 479       }
 480       case T_INT: {
 481         load_store = new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
 482         break;
 483       }
 484       case T_LONG: {
 485         load_store = new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
 486         break;
 487       }
 488       default:
 489         ShouldNotReachHere();
 490     }
 491   }
 492 
 493   load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
 494   load_store = kit->gvn().transform(load_store);
 495 
 496   access.set_raw_access(load_store);
 497   pin_atomic_op(access);
 498 
 499 #ifdef _LP64
 500   if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
 501     return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 502   }
 503 #endif
 504 
 505   return load_store;
 506 }
 507 
 508 Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 509                                                     Node* new_val, const Type* value_type) const {
 510   GraphKit* kit = access.kit();
 511   DecoratorSet decorators = access.decorators();
 512   MemNode::MemOrd mo = access.mem_node_mo();
 513   Node* mem = access.memory();
 514   bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
 515   Node* load_store = nullptr;
 516   Node* adr = access.addr().node();
 517 
 518   if (access.is_oop()) {
 519 #ifdef _LP64
 520     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 521       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 522       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 523       if (is_weak_cas) {
 524         load_store = new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
 525       } else {
 526         load_store = new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
 527       }
 528     } else
 529 #endif
 530     {
 531       if (is_weak_cas) {
 532         load_store = new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
 533       } else {
 534         load_store = new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
 535       }
 536     }
 537   } else {
 538     switch(access.type()) {
 539       case T_BYTE: {
 540         if (is_weak_cas) {
 541           load_store = new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
 542         } else {
 543           load_store = new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
 544         }
 545         break;
 546       }
 547       case T_SHORT: {
 548         if (is_weak_cas) {
 549           load_store = new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
 550         } else {
 551           load_store = new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
 552         }
 553         break;
 554       }
 555       case T_INT: {
 556         if (is_weak_cas) {
 557           load_store = new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
 558         } else {
 559           load_store = new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
 560         }
 561         break;
 562       }
 563       case T_LONG: {
 564         if (is_weak_cas) {
 565           load_store = new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
 566         } else {
 567           load_store = new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
 568         }
 569         break;
 570       }
 571       default:
 572         ShouldNotReachHere();
 573     }
 574   }
 575 
 576   load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
 577   load_store = kit->gvn().transform(load_store);
 578 
 579   access.set_raw_access(load_store);
 580   pin_atomic_op(access);
 581 
 582   return load_store;
 583 }
 584 
 585 Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
 586   GraphKit* kit = access.kit();
 587   Node* mem = access.memory();
 588   Node* adr = access.addr().node();
 589   const TypePtr* adr_type = access.addr().type();
 590   Node* load_store = nullptr;
 591 
 592   if (access.is_oop()) {
 593 #ifdef _LP64
 594     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 595       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 596       load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
 597     } else
 598 #endif
 599     {
 600       load_store = new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr());
 601     }
 602   } else  {
 603     switch (access.type()) {
 604       case T_BYTE:
 605         load_store = new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type);
 606         break;
 607       case T_SHORT:
 608         load_store = new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type);
 609         break;
 610       case T_INT:
 611         load_store = new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type);
 612         break;
 613       case T_LONG:
 614         load_store = new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type);
 615         break;
 616       default:
 617         ShouldNotReachHere();
 618     }
 619   }
 620 
 621   load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
 622   load_store = kit->gvn().transform(load_store);
 623 
 624   access.set_raw_access(load_store);
 625   pin_atomic_op(access);
 626 
 627 #ifdef _LP64
 628   if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
 629     return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 630   }
 631 #endif
 632 
 633   return load_store;
 634 }
 635 
 636 Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
 637   Node* load_store = nullptr;
 638   GraphKit* kit = access.kit();
 639   Node* adr = access.addr().node();
 640   const TypePtr* adr_type = access.addr().type();
 641   Node* mem = access.memory();
 642 
 643   switch(access.type()) {
 644     case T_BYTE:
 645       load_store = new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type);
 646       break;
 647     case T_SHORT:
 648       load_store = new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type);
 649       break;
 650     case T_INT:
 651       load_store = new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type);
 652       break;
 653     case T_LONG:
 654       load_store = new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type);
 655       break;
 656     default:
 657       ShouldNotReachHere();
 658   }
 659 
 660   load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
 661   load_store = kit->gvn().transform(load_store);
 662 
 663   access.set_raw_access(load_store);
 664   pin_atomic_op(access);
 665 
 666   return load_store;
 667 }
 668 
 669 Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
 670                                           Node* new_val, const Type* value_type) const {
 671   C2AccessFence fence(access);
 672   resolve_address(access);
 673   return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
 674 }
 675 
 676 Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
 677                                            Node* new_val, const Type* value_type) const {
 678   C2AccessFence fence(access);
 679   resolve_address(access);
 680   return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 681 }
 682 
 683 Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
 684   C2AccessFence fence(access);
 685   resolve_address(access);
 686   return atomic_xchg_at_resolved(access, new_val, value_type);
 687 }
 688 
 689 Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
 690   C2AccessFence fence(access);
 691   resolve_address(access);
 692   return atomic_add_at_resolved(access, new_val, value_type);
 693 }
 694 
 695 int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) {
 696   // Exclude the header but include array length to copy by 8 bytes words.
 697   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
 698   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
 699                             instanceOopDesc::base_offset_in_bytes();
 700   // base_off:
 701   // 8  - 32-bit VM or 64-bit VM, compact headers
 702   // 12 - 64-bit VM, compressed klass
 703   // 16 - 64-bit VM, normal klass
 704   if (base_off % BytesPerLong != 0) {
 705     assert(!UseCompactObjectHeaders, "");
 706     if (is_array) {
 707       // Exclude length to copy by 8 bytes words.
 708       base_off += sizeof(int);
 709     } else {
 710       // Include klass to copy by 8 bytes words.
 711       base_off = instanceOopDesc::klass_offset_in_bytes();
 712     }
 713     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
 714   }
 715   return base_off;
 716 }
 717 
 718 void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
 719   int base_off = arraycopy_payload_base_offset(is_array);
 720   Node* payload_size = size;
 721   Node* offset = kit->MakeConX(base_off);
 722   payload_size = kit->gvn().transform(new SubXNode(payload_size, offset));
 723   if (is_array) {
 724     // Ensure the array payload size is rounded up to the next BytesPerLong
 725     // multiple when converting to double-words. This is necessary because array
 726     // size does not include object alignment padding, so it might not be a
 727     // multiple of BytesPerLong for sub-long element types.
 728     payload_size = kit->gvn().transform(new AddXNode(payload_size, kit->MakeConX(BytesPerLong - 1)));
 729   }
 730   payload_size = kit->gvn().transform(new URShiftXNode(payload_size, kit->intcon(LogBytesPerLong)));
 731   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
 732   if (is_array) {
 733     ac->set_clone_array();
 734   } else {
 735     ac->set_clone_inst();
 736   }
 737   Node* n = kit->gvn().transform(ac);
 738   if (n == ac) {
 739     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 740     ac->set_adr_type(TypeRawPtr::BOTTOM);
 741     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
 742   } else {
 743     kit->set_all_memory(n);
 744   }
 745 }
 746 
 747 Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
 748                                  Node*& i_o, Node*& needgc_ctrl,
 749                                  Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
 750                                  intx prefetch_lines) const {
 751   assert(UseTLAB, "Only for TLAB enabled allocations");
 752 
 753   Node* thread = macro->transform_later(new ThreadLocalNode());
 754   Node* tlab_top_adr = macro->off_heap_plus_addr(thread, in_bytes(JavaThread::tlab_top_offset()));
 755   Node* tlab_end_adr = macro->off_heap_plus_addr(thread, in_bytes(JavaThread::tlab_end_offset()));
 756 
 757   // Load TLAB end.
 758   //
 759   // Note: We set the control input on "tlab_end" and "old_tlab_top" to work around
 760   //       a bug where these values were being moved across
 761   //       a safepoint.  These are not oops, so they cannot be include in the oop
 762   //       map, but they can be changed by a GC.   The proper way to fix this would
 763   //       be to set the raw memory state when generating a  SafepointNode.  However
 764   //       this will require extensive changes to the loop optimization in order to
 765   //       prevent a degradation of the optimization.
 766   //       See comment in memnode.hpp, around line 227 in class LoadPNode.
 767   Node* tlab_end = macro->make_load_raw(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
 768 
 769   // Load the TLAB top.
 770   Node* old_tlab_top = new LoadPNode(toobig_false, mem, tlab_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
 771   macro->transform_later(old_tlab_top);
 772 
 773   // Add to heap top to get a new TLAB top
 774   Node* new_tlab_top = AddPNode::make_off_heap(old_tlab_top, size_in_bytes);
 775   macro->transform_later(new_tlab_top);
 776 
 777   // Check against TLAB end
 778   Node* tlab_full = new CmpPNode(new_tlab_top, tlab_end);
 779   macro->transform_later(tlab_full);
 780 
 781   Node* needgc_bol = new BoolNode(tlab_full, BoolTest::ge);
 782   macro->transform_later(needgc_bol);
 783   IfNode* needgc_iff = new IfNode(toobig_false, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
 784   macro->transform_later(needgc_iff);
 785 
 786   // Plug the failing-heap-space-need-gc test into the slow-path region
 787   Node* needgc_true = new IfTrueNode(needgc_iff);
 788   macro->transform_later(needgc_true);
 789   needgc_ctrl = needgc_true;
 790 
 791   // No need for a GC.
 792   Node* needgc_false = new IfFalseNode(needgc_iff);
 793   macro->transform_later(needgc_false);
 794 
 795   // Fast path:
 796   i_o = macro->prefetch_allocation(i_o, needgc_false, mem,
 797                                    old_tlab_top, new_tlab_top, prefetch_lines);
 798 
 799   // Store the modified TLAB top back down.
 800   Node* store_tlab_top = new StorePNode(needgc_false, mem, tlab_top_adr,
 801                    TypeRawPtr::BOTTOM, new_tlab_top, MemNode::unordered);
 802   macro->transform_later(store_tlab_top);
 803 
 804   fast_oop_ctrl = needgc_false;
 805   fast_oop_rawmem = store_tlab_top;
 806   return old_tlab_top;
 807 }
 808 
 809 const TypeFunc* BarrierSetC2::_clone_type_Type = nullptr;
 810 
 811 void BarrierSetC2::make_clone_type() {
 812   assert(BarrierSetC2::_clone_type_Type == nullptr, "should be");
 813   // Create input type (domain)
 814   int argcnt = NOT_LP64(3) LP64_ONLY(4);
 815   const Type** const domain_fields = TypeTuple::fields(argcnt);
 816   int argp = TypeFunc::Parms;
 817   domain_fields[argp++] = TypeInstPtr::NOTNULL;  // src
 818   domain_fields[argp++] = TypeInstPtr::NOTNULL;  // dst
 819   domain_fields[argp++] = TypeX_X;               // size lower
 820   LP64_ONLY(domain_fields[argp++] = Type::HALF); // size upper
 821   assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
 822   const TypeTuple* const domain = TypeTuple::make(TypeFunc::Parms + argcnt, domain_fields);
 823 
 824   // Create result type (range)
 825   const Type** const range_fields = TypeTuple::fields(0);
 826   const TypeTuple* const range = TypeTuple::make(TypeFunc::Parms + 0, range_fields);
 827 
 828   BarrierSetC2::_clone_type_Type = TypeFunc::make(domain, range);
 829 }
 830 
 831 inline const TypeFunc* BarrierSetC2::clone_type() {
 832   assert(BarrierSetC2::_clone_type_Type != nullptr, "should be initialized");
 833   return BarrierSetC2::_clone_type_Type;
 834 }
 835 
 836 #define XTOP LP64_ONLY(COMMA phase->top())
 837 
 838 void BarrierSetC2::clone_in_runtime(PhaseMacroExpand* phase, ArrayCopyNode* ac,
 839                                     address clone_addr, const char* clone_name) const {
 840   Node* const ctrl = ac->in(TypeFunc::Control);
 841   Node* const mem  = ac->in(TypeFunc::Memory);
 842   Node* const src  = ac->in(ArrayCopyNode::Src);
 843   Node* const dst  = ac->in(ArrayCopyNode::Dest);
 844   Node* const size = ac->in(ArrayCopyNode::Length);
 845 
 846   assert(size->bottom_type()->base() == Type_X,
 847          "Should be of object size type (int for 32 bits, long for 64 bits)");
 848 
 849   // The native clone we are calling here expects the object size in words.
 850   // Add header/offset size to payload size to get object size.
 851   Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
 852   Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
 853   // HeapAccess<>::clone expects size in heap words.
 854   // For 64-bits platforms, this is a no-operation.
 855   // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
 856   Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
 857 
 858   Node* const call = phase->make_leaf_call(ctrl,
 859                                            mem,
 860                                            clone_type(),
 861                                            clone_addr,
 862                                            clone_name,
 863                                            TypeRawPtr::BOTTOM,
 864                                            src, dst, full_size_in_heap_words XTOP);
 865   phase->transform_later(call);
 866   phase->igvn().replace_node(ac, call);
 867 }
 868 
 869 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
 870   Node* ctrl = ac->in(TypeFunc::Control);
 871   Node* mem = ac->in(TypeFunc::Memory);
 872   Node* src = ac->in(ArrayCopyNode::Src);
 873   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
 874   Node* dest = ac->in(ArrayCopyNode::Dest);
 875   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
 876   Node* length = ac->in(ArrayCopyNode::Length);
 877 
 878   Node* payload_src = phase->basic_plus_adr(src, src_offset);
 879   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
 880 
 881   const char* copyfunc_name = "arraycopy";
 882   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
 883 
 884   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 885   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
 886 
 887   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
 888   phase->transform_later(call);
 889 
 890   phase->igvn().replace_node(ac, call);
 891 }
 892 
 893 #undef XTOP
 894 
 895 static bool block_has_safepoint(const Block* block, uint from, uint to) {
 896   for (uint i = from; i < to; i++) {
 897     if (block->get_node(i)->is_MachSafePoint()) {
 898       // Safepoint found
 899       return true;
 900     }
 901   }
 902 
 903   // Safepoint not found
 904   return false;
 905 }
 906 
 907 static bool block_has_safepoint(const Block* block) {
 908   return block_has_safepoint(block, 0, block->number_of_nodes());
 909 }
 910 
 911 static uint block_index(const Block* block, const Node* node) {
 912   for (uint j = 0; j < block->number_of_nodes(); ++j) {
 913     if (block->get_node(j) == node) {
 914       return j;
 915     }
 916   }
 917   ShouldNotReachHere();
 918   return 0;
 919 }
 920 
 921 // Look through various node aliases
 922 static const Node* look_through_node(const Node* node) {
 923   while (node != nullptr) {
 924     const Node* new_node = node;
 925     if (node->is_Mach()) {
 926       const MachNode* const node_mach = node->as_Mach();
 927       if (node_mach->ideal_Opcode() == Op_CheckCastPP) {
 928         new_node = node->in(1);
 929       }
 930       if (node_mach->is_SpillCopy()) {
 931         new_node = node->in(1);
 932       }
 933     }
 934     if (new_node == node || new_node == nullptr) {
 935       break;
 936     } else {
 937       node = new_node;
 938     }
 939   }
 940 
 941   return node;
 942 }
 943 
 944 // Whether the given offset is undefined.
 945 static bool is_undefined(intptr_t offset) {
 946   return offset == Type::OffsetTop;
 947 }
 948 
 949 // Whether the given offset is unknown.
 950 static bool is_unknown(intptr_t offset) {
 951   return offset == Type::OffsetBot;
 952 }
 953 
 954 // Whether the given offset is concrete (defined and compile-time known).
 955 static bool is_concrete(intptr_t offset) {
 956   return !is_undefined(offset) && !is_unknown(offset);
 957 }
 958 
 959 // Compute base + offset components of the memory address accessed by mach.
 960 // Return a node representing the base address, or null if the base cannot be
 961 // found or the offset is undefined or a concrete negative value. If a non-null
 962 // base is returned, the offset is a concrete, nonnegative value or unknown.
 963 static const Node* get_base_and_offset(const MachNode* mach, intptr_t& offset) {
 964   const TypePtr* adr_type = nullptr;
 965   offset = 0;
 966   const Node* base = mach->get_base_and_disp(offset, adr_type);
 967 
 968   if (base == nullptr || base == NodeSentinel) {
 969     return nullptr;
 970   }
 971 
 972   if (offset == 0 && base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_AddP) {
 973     // The memory address is computed by 'base' and fed to 'mach' via an
 974     // indirect memory operand (indicated by offset == 0). The ultimate base and
 975     // offset can be fetched directly from the inputs and Ideal type of 'base'.
 976     const TypeOopPtr* oopptr = base->bottom_type()->isa_oopptr();
 977     if (oopptr == nullptr) return nullptr;
 978     offset = oopptr->offset();
 979     // Even if 'base' is not an Ideal AddP node anymore, Matcher::ReduceInst()
 980     // guarantees that the base address is still available at the same slot.
 981     base = base->in(AddPNode::Base);
 982     assert(base != nullptr, "");
 983   }
 984 
 985   if (is_undefined(offset) || (is_concrete(offset) && offset < 0)) {
 986     return nullptr;
 987   }
 988 
 989   return look_through_node(base);
 990 }
 991 
 992 // Whether a phi node corresponds to an array allocation.
 993 // This test is incomplete: in some edge cases, it might return false even
 994 // though the node does correspond to an array allocation.
 995 static bool is_array_allocation(const Node* phi) {
 996   precond(phi->is_Phi());
 997   // Check whether phi has a successor cast (CheckCastPP) to Java array pointer,
 998   // possibly below spill copies and other cast nodes. Limit the exploration to
 999   // a single path from the phi node consisting of these node types.
1000   const Node* current = phi;
1001   while (true) {
1002     const Node* next = nullptr;
1003     for (DUIterator_Fast imax, i = current->fast_outs(imax); i < imax; i++) {
1004       if (!current->fast_out(i)->isa_Mach()) {
1005         continue;
1006       }
1007       const MachNode* succ = current->fast_out(i)->as_Mach();
1008       if (succ->ideal_Opcode() == Op_CheckCastPP) {
1009         if (succ->get_ptr_type()->isa_aryptr()) {
1010           // Cast to Java array pointer: phi corresponds to an array allocation.
1011           return true;
1012         }
1013         // Other cast: record as candidate for further exploration.
1014         next = succ;
1015       } else if (succ->is_SpillCopy() && next == nullptr) {
1016         // Spill copy, and no better candidate found: record as candidate.
1017         next = succ;
1018       }
1019     }
1020     if (next == nullptr) {
1021       // No evidence found that phi corresponds to an array allocation, and no
1022       // candidates available to continue exploring.
1023       return false;
1024     }
1025     // Continue exploring from the best candidate found.
1026     current = next;
1027   }
1028   ShouldNotReachHere();
1029 }
1030 
1031 bool BarrierSetC2::is_allocation(const Node* node) {
1032   assert(node->is_Phi(), "expected phi node");
1033   if (node->req() != 3) {
1034     return false;
1035   }
1036   const Node* const fast_node = node->in(2);
1037   if (!fast_node->is_Mach()) {
1038     return false;
1039   }
1040   const MachNode* const fast_mach = fast_node->as_Mach();
1041   if (fast_mach->ideal_Opcode() != Op_LoadP) {
1042     return false;
1043   }
1044   intptr_t offset;
1045   const Node* const base = get_base_and_offset(fast_mach, offset);
1046   if (base == nullptr || !base->is_Mach() || !is_concrete(offset)) {
1047     return false;
1048   }
1049   const MachNode* const base_mach = base->as_Mach();
1050   if (base_mach->ideal_Opcode() != Op_ThreadLocal) {
1051     return false;
1052   }
1053   return offset == in_bytes(Thread::tlab_top_offset());
1054 }
1055 
1056 void BarrierSetC2::elide_dominated_barriers(Node_List& accesses, Node_List& access_dominators) const {
1057   Compile* const C = Compile::current();
1058   PhaseCFG* const cfg = C->cfg();
1059 
1060   for (uint i = 0; i < accesses.size(); i++) {
1061     MachNode* const access = accesses.at(i)->as_Mach();
1062     intptr_t access_offset;
1063     const Node* const access_obj = get_base_and_offset(access, access_offset);
1064     Block* const access_block = cfg->get_block_for_node(access);
1065     const uint access_index = block_index(access_block, access);
1066 
1067     if (access_obj == nullptr) {
1068       // No information available
1069       continue;
1070     }
1071 
1072     for (uint j = 0; j < access_dominators.size(); j++) {
1073      const  Node* const mem = access_dominators.at(j);
1074       if (mem->is_Phi()) {
1075         assert(is_allocation(mem), "expected allocation phi node");
1076         if (mem != access_obj) {
1077           continue;
1078         }
1079         if (is_unknown(access_offset) && !is_array_allocation(mem)) {
1080           // The accessed address has an unknown offset, but the allocated
1081           // object cannot be determined to be an array. Avoid eliding in this
1082           // case, to be on the safe side.
1083           continue;
1084         }
1085         assert((is_concrete(access_offset) && access_offset >= 0) || (is_unknown(access_offset) && is_array_allocation(mem)),
1086                "candidate allocation-dominated access offsets must be either concrete and nonnegative, or unknown (for array allocations only)");
1087       } else {
1088         // Access node
1089         const MachNode* const mem_mach = mem->as_Mach();
1090         intptr_t mem_offset;
1091         const Node* const mem_obj = get_base_and_offset(mem_mach, mem_offset);
1092 
1093         if (mem_obj == nullptr ||
1094             !is_concrete(access_offset) ||
1095             !is_concrete(mem_offset)) {
1096           // No information available
1097           continue;
1098         }
1099 
1100         if (mem_obj != access_obj || mem_offset != access_offset) {
1101           // Not the same addresses, not a candidate
1102           continue;
1103         }
1104         assert(is_concrete(access_offset) && access_offset >= 0,
1105                "candidate non-allocation-dominated access offsets must be concrete and nonnegative");
1106       }
1107 
1108       Block* mem_block = cfg->get_block_for_node(mem);
1109       const uint mem_index = block_index(mem_block, mem);
1110 
1111       if (access_block == mem_block) {
1112         // Earlier accesses in the same block
1113         if (mem_index < access_index && !block_has_safepoint(mem_block, mem_index + 1, access_index)) {
1114           elide_dominated_barrier(access);
1115         }
1116       } else if (mem_block->dominates(access_block)) {
1117         // Dominating block? Look around for safepoints
1118         ResourceMark rm;
1119         Block_List stack;
1120         VectorSet visited;
1121         stack.push(access_block);
1122         bool safepoint_found = block_has_safepoint(access_block);
1123         while (!safepoint_found && stack.size() > 0) {
1124           const Block* const block = stack.pop();
1125           if (visited.test_set(block->_pre_order)) {
1126             continue;
1127           }
1128           if (block_has_safepoint(block)) {
1129             safepoint_found = true;
1130             break;
1131           }
1132           if (block == mem_block) {
1133             continue;
1134           }
1135 
1136           // Push predecessor blocks
1137           for (uint p = 1; p < block->num_preds(); ++p) {
1138             Block* const pred = cfg->get_block_for_node(block->pred(p));
1139             stack.push(pred);
1140           }
1141         }
1142 
1143         if (!safepoint_found) {
1144           elide_dominated_barrier(access);
1145         }
1146       }
1147     }
1148   }
1149 }
1150 
1151 void BarrierSetC2::compute_liveness_at_stubs() const {
1152   ResourceMark rm;
1153   Compile* const C = Compile::current();
1154   Arena* const A = Thread::current()->resource_area();
1155   PhaseCFG* const cfg = C->cfg();
1156   PhaseRegAlloc* const regalloc = C->regalloc();
1157   RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
1158   BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
1159   BarrierSetC2State* bs_state = barrier_set_state();
1160   Block_List worklist;
1161 
1162   for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
1163     new ((void*)(live + i)) RegMask();
1164     worklist.push(cfg->get_block(i));
1165   }
1166 
1167   while (worklist.size() > 0) {
1168     const Block* const block = worklist.pop();
1169     RegMask& old_live = live[block->_pre_order];
1170     RegMask new_live;
1171 
1172     // Initialize to union of successors
1173     for (uint i = 0; i < block->_num_succs; i++) {
1174       const uint succ_id = block->_succs[i]->_pre_order;
1175       new_live.or_with(live[succ_id]);
1176     }
1177 
1178     // Walk block backwards, computing liveness
1179     for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
1180       const Node* const node = block->get_node(i);
1181 
1182       // If this node tracks out-liveness, update it
1183       if (!bs_state->needs_livein_data()) {
1184         RegMask* const regs = bs_state->live(node);
1185         if (regs != nullptr) {
1186           regs->or_with(new_live);
1187         }
1188       }
1189 
1190       // Remove def bits
1191       const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
1192       const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
1193       if (first != OptoReg::Bad) {
1194         new_live.remove(first);
1195       }
1196       if (second != OptoReg::Bad) {
1197         new_live.remove(second);
1198       }
1199 
1200       // Add use bits
1201       for (uint j = 1; j < node->req(); ++j) {
1202         const Node* const use = node->in(j);
1203         const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
1204         const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
1205         if (first != OptoReg::Bad) {
1206           new_live.insert(first);
1207         }
1208         if (second != OptoReg::Bad) {
1209           new_live.insert(second);
1210         }
1211       }
1212 
1213       // If this node tracks in-liveness, update it
1214       if (bs_state->needs_livein_data()) {
1215         RegMask* const regs = bs_state->live(node);
1216         if (regs != nullptr) {
1217           regs->or_with(new_live);
1218         }
1219       }
1220     }
1221 
1222     // Now at block top, see if we have any changes
1223     new_live.subtract(old_live);
1224     if (!new_live.is_empty()) {
1225       // Liveness has refined, update and propagate to prior blocks
1226       old_live.or_with(new_live);
1227       for (uint i = 1; i < block->num_preds(); ++i) {
1228         Block* const pred = cfg->get_block_for_node(block->pred(i));
1229         worklist.push(pred);
1230       }
1231     }
1232   }
1233 }