1 /*
   2  * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahRuntime.hpp"
  32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  33 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  34 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  36 #include "opto/arraycopynode.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/graphKit.hpp"
  39 #include "opto/idealKit.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 
  46 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  47   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  48 }
  49 
  50 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
  51   : _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8,  0, nullptr)) {
  52 }
  53 
  54 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
  55   return _load_reference_barriers->length();
  56 }
  57 
  58 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
  59   return _load_reference_barriers->at(idx);
  60 }
  61 
  62 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
  63   assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
  64   _load_reference_barriers->append(n);
  65 }
  66 
  67 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
  68   if (_load_reference_barriers->contains(n)) {
  69     _load_reference_barriers->remove(n);
  70   }
  71 }
  72 
  73 #define __ kit->
  74 
  75 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseValues* phase, Node* adr,
  76                                                          BasicType bt, uint adr_idx) const {
  77   intptr_t offset = 0;
  78   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
  79   AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
  80 
  81   if (offset == Type::OffsetBot) {
  82     return false; // cannot unalias unless there are precise offsets
  83   }
  84 
  85   if (alloc == nullptr) {
  86     return false; // No allocation found
  87   }
  88 
  89   intptr_t size_in_bytes = type2aelembytes(bt);
  90 
  91   Node* mem = __ memory(adr_idx); // start searching here...
  92 
  93   for (int cnt = 0; cnt < 50; cnt++) {
  94 
  95     if (mem->is_Store()) {
  96 
  97       Node* st_adr = mem->in(MemNode::Address);
  98       intptr_t st_offset = 0;
  99       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 100 
 101       if (st_base == nullptr) {
 102         break; // inscrutable pointer
 103       }
 104 
 105       // Break we have found a store with same base and offset as ours so break
 106       if (st_base == base && st_offset == offset) {
 107         break;
 108       }
 109 
 110       if (st_offset != offset && st_offset != Type::OffsetBot) {
 111         const int MAX_STORE = BytesPerLong;
 112         if (st_offset >= offset + size_in_bytes ||
 113             st_offset <= offset - MAX_STORE ||
 114             st_offset <= offset - mem->as_Store()->memory_size()) {
 115           // Success:  The offsets are provably independent.
 116           // (You may ask, why not just test st_offset != offset and be done?
 117           // The answer is that stores of different sizes can co-exist
 118           // in the same sequence of RawMem effects.  We sometimes initialize
 119           // a whole 'tile' of array elements with a single jint or jlong.)
 120           mem = mem->in(MemNode::Memory);
 121           continue; // advance through independent store memory
 122         }
 123       }
 124 
 125       if (st_base != base
 126           && MemNode::detect_ptr_independence(base, alloc, st_base,
 127                                               AllocateNode::Ideal_allocation(st_base),
 128                                               phase)) {
 129         // Success:  The bases are provably independent.
 130         mem = mem->in(MemNode::Memory);
 131         continue; // advance through independent store memory
 132       }
 133     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 134 
 135       InitializeNode* st_init = mem->in(0)->as_Initialize();
 136       AllocateNode* st_alloc = st_init->allocation();
 137 
 138       // Make sure that we are looking at the same allocation site.
 139       // The alloc variable is guaranteed to not be null here from earlier check.
 140       if (alloc == st_alloc) {
 141         // Check that the initialization is storing null so that no previous store
 142         // has been moved up and directly write a reference
 143         Node* captured_store = st_init->find_captured_store(offset,
 144                                                             type2aelembytes(T_OBJECT),
 145                                                             phase);
 146         if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
 147           return true;
 148         }
 149       }
 150     }
 151 
 152     // Unless there is an explicit 'continue', we must bail out here,
 153     // because 'mem' is an inscrutable memory state (e.g., a call).
 154     break;
 155   }
 156 
 157   return false;
 158 }
 159 
 160 #undef __
 161 #define __ ideal.
 162 
 163 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
 164                                                     bool do_load,
 165                                                     Node* obj,
 166                                                     Node* adr,
 167                                                     uint alias_idx,
 168                                                     Node* val,
 169                                                     const TypeOopPtr* val_type,
 170                                                     Node* pre_val,
 171                                                     BasicType bt) const {
 172   // Some sanity checks
 173   // Note: val is unused in this routine.
 174 
 175   if (do_load) {
 176     // We need to generate the load of the previous value
 177     assert(adr != nullptr, "where are loading from?");
 178     assert(pre_val == nullptr, "loaded already?");
 179     assert(val_type != nullptr, "need a type");
 180 
 181     if (ReduceInitialCardMarks
 182         && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 183       return;
 184     }
 185 
 186   } else {
 187     // In this case both val_type and alias_idx are unused.
 188     assert(pre_val != nullptr, "must be loaded already");
 189     // Nothing to be done if pre_val is null.
 190     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 191     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 192   }
 193   assert(bt == T_OBJECT, "or we shouldn't be here");
 194 
 195   IdealKit ideal(kit, true);
 196 
 197   Node* tls = __ thread(); // ThreadLocalStorage
 198 
 199   Node* no_base = __ top();
 200   Node* zero  = __ ConI(0);
 201   Node* zeroX = __ ConX(0);
 202 
 203   float likely  = PROB_LIKELY(0.999);
 204   float unlikely  = PROB_UNLIKELY(0.999);
 205 
 206   // Offsets into the thread
 207   const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
 208   const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 209 
 210   // Now the actual pointers into the thread
 211   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 212   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 213 
 214   // Now some of the values
 215   Node* marking;
 216   Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
 217   Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
 218   marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
 219   assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
 220 
 221   // if (!marking)
 222   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 223     BasicType index_bt = TypeX_X->basic_type();
 224     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
 225     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 226 
 227     if (do_load) {
 228       // load original value
 229       // alias_idx correct??
 230       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 231     }
 232 
 233     // if (pre_val != nullptr)
 234     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 235       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 236 
 237       // is the queue for this thread full?
 238       __ if_then(index, BoolTest::ne, zeroX, likely); {
 239 
 240         // decrement the index
 241         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 242 
 243         // Now get the buffer location we will log the previous value into and store it
 244         Node *log_addr = __ AddP(no_base, buffer, next_index);
 245         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 246         // update the index
 247         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 248 
 249       } __ else_(); {
 250 
 251         // logging buffer is full, call the runtime
 252         const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_Type();
 253         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), "shenandoah_wb_pre",
 254                           pre_val, tls);
 255       } __ end_if();  // (!index)
 256     } __ end_if();  // (pre_val != nullptr)
 257   } __ end_if();  // (!marking)
 258 
 259   // Final sync IdealKit and GraphKit.
 260   kit->final_sync(ideal);
 261 
 262   if (ShenandoahSATBBarrier && adr != nullptr) {
 263     Node* c = kit->control();
 264     Node* call = c->in(1)->in(1)->in(1)->in(0);
 265     assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
 266     call->add_req(adr);
 267   }
 268 }
 269 
 270 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
 271   return call->is_CallLeaf() &&
 272          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre);
 273 }
 274 
 275 bool ShenandoahBarrierSetC2::is_shenandoah_clone_call(Node* call) {
 276   return call->is_CallLeaf() &&
 277          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::clone_barrier);
 278 }
 279 
 280 bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) {
 281   if (!call->is_CallLeaf()) {
 282     return false;
 283   }
 284 
 285   address entry_point = call->as_CallLeaf()->entry_point();
 286   return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)) ||
 287          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)) ||
 288          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)) ||
 289          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)) ||
 290          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)) ||
 291          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow));
 292 }
 293 
 294 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseValues* phase, Node* n) {
 295   if (n->Opcode() != Op_If) {
 296     return false;
 297   }
 298 
 299   Node* bol = n->in(1);
 300   assert(bol->is_Bool(), "");
 301   Node* cmpx = bol->in(1);
 302   if (bol->as_Bool()->_test._test == BoolTest::ne &&
 303       cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
 304       is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
 305       cmpx->in(1)->in(2)->is_Con() &&
 306       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
 307     return true;
 308   }
 309 
 310   return false;
 311 }
 312 
 313 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
 314   if (!n->is_Load()) return false;
 315   const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
 316   return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
 317          && n->in(2)->in(3)->is_Con()
 318          && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
 319 }
 320 
 321 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
 322                                                           bool do_load,
 323                                                           Node* obj,
 324                                                           Node* adr,
 325                                                           uint alias_idx,
 326                                                           Node* val,
 327                                                           const TypeOopPtr* val_type,
 328                                                           Node* pre_val,
 329                                                           BasicType bt) const {
 330   if (ShenandoahSATBBarrier) {
 331     IdealKit ideal(kit);
 332     kit->sync_kit(ideal);
 333 
 334     satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
 335 
 336     ideal.sync_kit(kit);
 337     kit->final_sync(ideal);
 338   }
 339 }
 340 
 341 // Helper that guards and inserts a pre-barrier.
 342 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 343                                                 Node* pre_val, bool need_mem_bar) const {
 344   // We could be accessing the referent field of a reference object. If so, when Shenandoah
 345   // is enabled, we need to log the value in the referent field in an SATB buffer.
 346   // This routine performs some compile time filters and generates suitable
 347   // runtime filters that guard the pre-barrier code.
 348   // Also add memory barrier for non volatile load from the referent field
 349   // to prevent commoning of loads across safepoint.
 350 
 351   // Some compile time checks.
 352 
 353   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 354   const TypeX* otype = offset->find_intptr_t_type();
 355   if (otype != nullptr && otype->is_con() &&
 356       otype->get_con() != java_lang_ref_Reference::referent_offset()) {
 357     // Constant offset but not the reference_offset so just return
 358     return;
 359   }
 360 
 361   // We only need to generate the runtime guards for instances.
 362   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 363   if (btype != nullptr) {
 364     if (btype->isa_aryptr()) {
 365       // Array type so nothing to do
 366       return;
 367     }
 368 
 369     const TypeInstPtr* itype = btype->isa_instptr();
 370     if (itype != nullptr) {
 371       // Can the klass of base_oop be statically determined to be
 372       // _not_ a sub-class of Reference and _not_ Object?
 373       ciKlass* klass = itype->instance_klass();
 374       if (klass->is_loaded() &&
 375           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 376           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 377         return;
 378       }
 379     }
 380   }
 381 
 382   // The compile time filters did not reject base_oop/offset so
 383   // we need to generate the following runtime filters
 384   //
 385   // if (offset == java_lang_ref_Reference::_reference_offset) {
 386   //   if (instance_of(base, java.lang.ref.Reference)) {
 387   //     pre_barrier(_, pre_val, ...);
 388   //   }
 389   // }
 390 
 391   float likely   = PROB_LIKELY(  0.999);
 392   float unlikely = PROB_UNLIKELY(0.999);
 393 
 394   IdealKit ideal(kit);
 395 
 396   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset());
 397 
 398   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 399       // Update graphKit memory and control from IdealKit.
 400       kit->sync_kit(ideal);
 401 
 402       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 403       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 404 
 405       // Update IdealKit memory and control from graphKit.
 406       __ sync_kit(kit);
 407 
 408       Node* one = __ ConI(1);
 409       // is_instof == 0 if base_oop == nullptr
 410       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 411 
 412         // Update graphKit from IdeakKit.
 413         kit->sync_kit(ideal);
 414 
 415         // Use the pre-barrier to record the value in the referent field
 416         satb_write_barrier_pre(kit, false /* do_load */,
 417                                nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 418                                pre_val /* pre_val */,
 419                                T_OBJECT);
 420         if (need_mem_bar) {
 421           // Add memory barrier to prevent commoning reads from this field
 422           // across safepoint since GC can change its value.
 423           kit->insert_mem_bar(Op_MemBarCPUOrder);
 424         }
 425         // Update IdealKit from graphKit.
 426         __ sync_kit(kit);
 427 
 428       } __ end_if(); // _ref_type != ref_none
 429   } __ end_if(); // offset == referent_offset
 430 
 431   // Final sync IdealKit and GraphKit.
 432   kit->final_sync(ideal);
 433 }
 434 
 435 #undef __
 436 
 437 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_Type() {
 438   const Type **fields = TypeTuple::fields(2);
 439   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 440   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 441   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 442 
 443   // create result type (range)
 444   fields = TypeTuple::fields(0);
 445   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 446 
 447   return TypeFunc::make(domain, range);
 448 }
 449 
 450 const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() {
 451   const Type **fields = TypeTuple::fields(1);
 452   fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
 453   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 454 
 455   // create result type (range)
 456   fields = TypeTuple::fields(0);
 457   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 458 
 459   return TypeFunc::make(domain, range);
 460 }
 461 
 462 const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() {
 463   const Type **fields = TypeTuple::fields(2);
 464   fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM; // original field value
 465   fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address
 466 
 467   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 468 
 469   // create result type (range)
 470   fields = TypeTuple::fields(1);
 471   fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM;
 472   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 473 
 474   return TypeFunc::make(domain, range);
 475 }
 476 
 477 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 478   DecoratorSet decorators = access.decorators();
 479 
 480   const TypePtr* adr_type = access.addr().type();
 481   Node* adr = access.addr().node();
 482 
 483   bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
 484 
 485   if (!access.is_oop()) {
 486     return BarrierSetC2::store_at_resolved(access, val);
 487   }
 488 
 489   if (no_keepalive) {
 490     // No keep-alive means no need for the pre-barrier.
 491     return BarrierSetC2::store_at_resolved(access, val);
 492   }
 493 
 494   if (access.is_parse_access()) {
 495     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 496     GraphKit* kit = parse_access.kit();
 497 
 498     uint adr_idx = kit->C->get_alias_index(adr_type);
 499     assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
 500     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 501                                  static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
 502   }
 503   return BarrierSetC2::store_at_resolved(access, val);
 504 }
 505 
 506 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 507   // 1: non-reference load, no additional barrier is needed
 508   if (!access.is_oop()) {
 509     return BarrierSetC2::load_at_resolved(access, val_type);
 510   }
 511 
 512   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 513   DecoratorSet decorators = access.decorators();
 514   BasicType type = access.type();
 515 
 516   // 2: apply LRB if needed
 517   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 518     load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
 519     if (access.is_parse_access()) {
 520       load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 521     } else {
 522       load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 523     }
 524   }
 525 
 526   // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
 527   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 528     Node* top = Compile::current()->top();
 529     Node* adr = access.addr().node();
 530     Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 531     Node* obj = access.base();
 532 
 533     bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 534     bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 535     bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 536 
 537     // If we are reading the value of the referent field of a Reference
 538     // object (either by using Unsafe directly or through reflection)
 539     // then, if SATB is enabled, we need to record the referent in an
 540     // SATB log buffer using the pre-barrier mechanism.
 541     // Also we need to add memory barrier to prevent commoning reads
 542     // from this field across safepoint since GC can change its value.
 543     if (!on_weak_ref || (unknown && (offset == top || obj == top)) || !keep_alive) {
 544       return load;
 545     }
 546 
 547     assert(access.is_parse_access(), "entry not supported at optimization time");
 548     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 549     GraphKit* kit = parse_access.kit();
 550     bool mismatched = (decorators & C2_MISMATCHED) != 0;
 551     bool is_unordered = (decorators & MO_UNORDERED) != 0;
 552     bool in_native = (decorators & IN_NATIVE) != 0;
 553     bool need_cpu_mem_bar = !is_unordered || mismatched || in_native;
 554 
 555     if (on_weak_ref) {
 556       // Use the pre-barrier to record the value in the referent field
 557       satb_write_barrier_pre(kit, false /* do_load */,
 558                              nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 559                              load /* pre_val */, T_OBJECT);
 560       // Add memory barrier to prevent commoning reads from this field
 561       // across safepoint since GC can change its value.
 562       kit->insert_mem_bar(Op_MemBarCPUOrder);
 563     } else if (unknown) {
 564       // We do not require a mem bar inside pre_barrier if need_mem_bar
 565       // is set: the barriers would be emitted by us.
 566       insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 567     }
 568   }
 569 
 570   return load;
 571 }
 572 
 573 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 574                                                    Node* new_val, const Type* value_type) const {
 575   GraphKit* kit = access.kit();
 576   if (access.is_oop()) {
 577     shenandoah_write_barrier_pre(kit, false /* do_load */,
 578                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 579                                  expected_val /* pre_val */, T_OBJECT);
 580 
 581     MemNode::MemOrd mo = access.mem_node_mo();
 582     Node* mem = access.memory();
 583     Node* adr = access.addr().node();
 584     const TypePtr* adr_type = access.addr().type();
 585     Node* load_store = nullptr;
 586 
 587 #ifdef _LP64
 588     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 589       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 590       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 591       if (ShenandoahCASBarrier) {
 592         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 593       } else {
 594         load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 595       }
 596     } else
 597 #endif
 598     {
 599       if (ShenandoahCASBarrier) {
 600         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 601       } else {
 602         load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 603       }
 604     }
 605 
 606     access.set_raw_access(load_store);
 607     pin_atomic_op(access);
 608 
 609 #ifdef _LP64
 610     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 611       load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 612     }
 613 #endif
 614     load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
 615     return load_store;
 616   }
 617   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
 618 }
 619 
 620 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 621                                                               Node* new_val, const Type* value_type) const {
 622   GraphKit* kit = access.kit();
 623   if (access.is_oop()) {
 624     shenandoah_write_barrier_pre(kit, false /* do_load */,
 625                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 626                                  expected_val /* pre_val */, T_OBJECT);
 627     DecoratorSet decorators = access.decorators();
 628     MemNode::MemOrd mo = access.mem_node_mo();
 629     Node* mem = access.memory();
 630     bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
 631     Node* load_store = nullptr;
 632     Node* adr = access.addr().node();
 633 #ifdef _LP64
 634     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 635       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 636       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 637       if (ShenandoahCASBarrier) {
 638         if (is_weak_cas) {
 639           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 640         } else {
 641           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 642         }
 643       } else {
 644         if (is_weak_cas) {
 645           load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 646         } else {
 647           load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 648         }
 649       }
 650     } else
 651 #endif
 652     {
 653       if (ShenandoahCASBarrier) {
 654         if (is_weak_cas) {
 655           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 656         } else {
 657           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 658         }
 659       } else {
 660         if (is_weak_cas) {
 661           load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 662         } else {
 663           load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 664         }
 665       }
 666     }
 667     access.set_raw_access(load_store);
 668     pin_atomic_op(access);
 669     return load_store;
 670   }
 671   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 672 }
 673 
 674 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
 675   GraphKit* kit = access.kit();
 676   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
 677   if (access.is_oop()) {
 678     result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
 679     shenandoah_write_barrier_pre(kit, false /* do_load */,
 680                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 681                                  result /* pre_val */, T_OBJECT);
 682   }
 683   return result;
 684 }
 685 
 686 
 687 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
 688   return is_shenandoah_wb_pre_call(node);
 689 }
 690 
 691 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
 692   return (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) ||
 693          is_shenandoah_lrb_call(node) ||
 694          is_shenandoah_wb_pre_call(node) ||
 695          is_shenandoah_clone_call(node);
 696 }
 697 
 698 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
 699   if (c == nullptr) {
 700     return c;
 701   }
 702   if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 703     return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
 704   }
 705   return c;
 706 }
 707 
 708 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
 709   return !ShenandoahBarrierC2Support::expand(C, igvn);
 710 }
 711 
 712 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
 713   if (mode == LoopOptsShenandoahExpand) {
 714     assert(UseShenandoahGC, "only for shenandoah");
 715     ShenandoahBarrierC2Support::pin_and_expand(phase);
 716     return true;
 717   } else if (mode == LoopOptsShenandoahPostExpand) {
 718     assert(UseShenandoahGC, "only for shenandoah");
 719     visited.clear();
 720     ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
 721     return true;
 722   }
 723   return false;
 724 }
 725 
 726 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
 727   bool is_oop = is_reference_type(type);
 728   if (!is_oop) {
 729     return false;
 730   }
 731   if (ShenandoahSATBBarrier && tightly_coupled_alloc) {
 732     if (phase == Optimization) {
 733       return false;
 734     }
 735     return !is_clone;
 736   }
 737   return true;
 738 }
 739 
 740 bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
 741   const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
 742   if (src_type->isa_instptr() != nullptr) {
 743     ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
 744     if ((src_type->klass_is_exact() || !ik->has_subklass()) && !ik->has_injected_fields()) {
 745       if (ik->has_object_fields()) {
 746         return true;
 747       } else {
 748         if (!src_type->klass_is_exact()) {
 749           Compile::current()->dependencies()->assert_leaf_type(ik);
 750         }
 751       }
 752     } else {
 753       return true;
 754         }
 755   } else if (src_type->isa_aryptr()) {
 756     BasicType src_elem = src_type->isa_aryptr()->elem()->array_element_basic_type();
 757     if (is_reference_type(src_elem, true)) {
 758       return true;
 759     }
 760   } else {
 761     return true;
 762   }
 763   return false;
 764 }
 765 
 766 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
 767   Node* ctrl = ac->in(TypeFunc::Control);
 768   Node* mem = ac->in(TypeFunc::Memory);
 769   Node* src_base = ac->in(ArrayCopyNode::Src);
 770   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
 771   Node* dest_base = ac->in(ArrayCopyNode::Dest);
 772   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
 773   Node* length = ac->in(ArrayCopyNode::Length);
 774 
 775   Node* src = phase->basic_plus_adr(src_base, src_offset);
 776   Node* dest = phase->basic_plus_adr(dest_base, dest_offset);
 777 
 778   if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
 779     // Check if heap is has forwarded objects. If it does, we need to call into the special
 780     // routine that would fix up source references before we can continue.
 781 
 782     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
 783     Node* region = new RegionNode(PATH_LIMIT);
 784     Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
 785 
 786     Node* thread = phase->transform_later(new ThreadLocalNode());
 787     Node* offset = phase->MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 788     Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
 789 
 790     uint gc_state_idx = Compile::AliasIdxRaw;
 791     const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
 792     debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 793 
 794     Node* gc_state    = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
 795     Node* stable_and  = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)));
 796     Node* stable_cmp  = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
 797     Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
 798 
 799     IfNode* stable_iff  = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
 800     Node* stable_ctrl   = phase->transform_later(new IfFalseNode(stable_iff));
 801     Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
 802 
 803     // Heap is stable, no need to do anything additional
 804     region->init_req(_heap_stable, stable_ctrl);
 805     mem_phi->init_req(_heap_stable, mem);
 806 
 807     // Heap is unstable, call into clone barrier stub
 808     Node* call = phase->make_leaf_call(unstable_ctrl, mem,
 809                                        ShenandoahBarrierSetC2::clone_barrier_Type(),
 810                                        CAST_FROM_FN_PTR(address, ShenandoahRuntime::clone_barrier),
 811                                        "shenandoah_clone",
 812                                        TypeRawPtr::BOTTOM,
 813                                        src_base);
 814     call = phase->transform_later(call);
 815 
 816     ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control));
 817     mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory));
 818     region->init_req(_heap_unstable, ctrl);
 819     mem_phi->init_req(_heap_unstable, mem);
 820 
 821     // Wire up the actual arraycopy stub now
 822     ctrl = phase->transform_later(region);
 823     mem = phase->transform_later(mem_phi);
 824 
 825     const char* name = "arraycopy";
 826     call = phase->make_leaf_call(ctrl, mem,
 827                                  OptoRuntime::fast_arraycopy_Type(),
 828                                  phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, name, true),
 829                                  name, TypeRawPtr::BOTTOM,
 830                                  src, dest, length
 831                                  LP64_ONLY(COMMA phase->top()));
 832     call = phase->transform_later(call);
 833 
 834     // Hook up the whole thing into the graph
 835     phase->replace_node(ac, call);
 836   } else {
 837     BarrierSetC2::clone_at_expansion(phase, ac);
 838   }
 839 }
 840 
 841 
 842 // Support for macro expanded GC barriers
 843 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
 844   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 845     state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
 846   }
 847 }
 848 
 849 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
 850   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 851     state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
 852   }
 853 }
 854 
 855 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* n) const {
 856   if (is_shenandoah_wb_pre_call(n)) {
 857     shenandoah_eliminate_wb_pre(n, igvn);
 858   }
 859 }
 860 
 861 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
 862   assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
 863   Node* c = call->as_Call()->proj_out(TypeFunc::Control);
 864   c = c->unique_ctrl_out();
 865   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
 866   c = c->unique_ctrl_out();
 867   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
 868   Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
 869   assert(iff->is_If(), "expect test");
 870   if (!is_shenandoah_marking_if(igvn, iff)) {
 871     c = c->unique_ctrl_out();
 872     assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
 873     iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
 874     assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
 875   }
 876   Node* cmpx = iff->in(1)->in(1);
 877   igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
 878   igvn->rehash_node_delayed(call);
 879   call->del_req(call->req()-1);
 880 }
 881 
 882 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
 883   if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) {
 884     igvn->add_users_to_worklist(node);
 885   }
 886 }
 887 
 888 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
 889   for (uint i = 0; i < useful.size(); i++) {
 890     Node* n = useful.at(i);
 891     if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) {
 892       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 893         C->record_for_igvn(n->fast_out(i));
 894       }
 895     }
 896   }
 897 
 898   for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
 899     ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
 900     if (!useful.member(n)) {
 901       state()->remove_load_reference_barrier(n);
 902     }
 903   }
 904 }
 905 
 906 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
 907   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
 908 }
 909 
 910 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
 911   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
 912 }
 913 
 914 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
 915 // expanded later, then now is the time to do so.
 916 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
 917 
 918 #ifdef ASSERT
 919 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
 920   if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeMacroExpand) {
 921     ShenandoahBarrierC2Support::verify(Compile::current()->root());
 922   } else if (phase == BarrierSetC2::BeforeCodeGen) {
 923     // Verify Shenandoah pre-barriers
 924     const int gc_state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
 925 
 926     Unique_Node_List visited;
 927     Node_List worklist;
 928     // We're going to walk control flow backwards starting from the Root
 929     worklist.push(compile->root());
 930     while (worklist.size() > 0) {
 931       Node *x = worklist.pop();
 932       if (x == nullptr || x == compile->top()) {
 933         continue;
 934       }
 935 
 936       if (visited.member(x)) {
 937         continue;
 938       } else {
 939         visited.push(x);
 940       }
 941 
 942       if (x->is_Region()) {
 943         for (uint i = 1; i < x->req(); i++) {
 944           worklist.push(x->in(i));
 945         }
 946       } else {
 947         worklist.push(x->in(0));
 948         // We are looking for the pattern:
 949         //                            /->ThreadLocal
 950         // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
 951         //              \->ConI(0)
 952         // We want to verify that the If and the LoadB have the same control
 953         // See GraphKit::g1_write_barrier_pre()
 954         if (x->is_If()) {
 955           IfNode *iff = x->as_If();
 956           if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
 957             CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
 958             if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
 959                 && cmp->in(1)->is_Load()) {
 960               LoadNode *load = cmp->in(1)->as_Load();
 961               if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
 962                   && load->in(2)->in(3)->is_Con()
 963                   && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == gc_state_offset) {
 964 
 965                 Node *if_ctrl = iff->in(0);
 966                 Node *load_ctrl = load->in(0);
 967 
 968                 if (if_ctrl != load_ctrl) {
 969                   // Skip possible CProj->NeverBranch in infinite loops
 970                   if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
 971                       && if_ctrl->in(0)->is_NeverBranch()) {
 972                     if_ctrl = if_ctrl->in(0)->in(0);
 973                   }
 974                 }
 975                 assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
 976               }
 977             }
 978           }
 979         }
 980       }
 981     }
 982   }
 983 }
 984 #endif
 985 
 986 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const {
 987   if (is_shenandoah_wb_pre_call(n)) {
 988     uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_Type()->domain_sig()->cnt();
 989     if (n->req() > cnt) {
 990       Node* addp = n->in(cnt);
 991       if (has_only_shenandoah_wb_pre_uses(addp)) {
 992         n->del_req(cnt);
 993         if (can_reshape) {
 994           phase->is_IterGVN()->_worklist.push(addp);
 995         }
 996         return n;
 997       }
 998     }
 999   }
1000   if (n->Opcode() == Op_CmpP) {
1001     Node* in1 = n->in(1);
1002     Node* in2 = n->in(2);
1003 
1004     // If one input is null, then step over the strong LRB barriers on the other input
1005     if (in1->bottom_type() == TypePtr::NULL_PTR &&
1006         !((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
1007           !ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in2)->decorators()))) {
1008       in2 = step_over_gc_barrier(in2);
1009     }
1010     if (in2->bottom_type() == TypePtr::NULL_PTR &&
1011         !((in1->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
1012           !ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in1)->decorators()))) {
1013       in1 = step_over_gc_barrier(in1);
1014     }
1015 
1016     if (in1 != n->in(1)) {
1017       n->set_req_X(1, in1, phase);
1018       assert(in2 == n->in(2), "only one change");
1019       return n;
1020     }
1021     if (in2 != n->in(2)) {
1022       n->set_req_X(2, in2, phase);
1023       return n;
1024     }
1025   } else if (can_reshape &&
1026              n->Opcode() == Op_If &&
1027              ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
1028              n->in(0) != nullptr &&
1029              n->outcnt() == 2) {
1030     Node* dom = n->in(0);
1031     Node* prev_dom = n;
1032     int op = n->Opcode();
1033     int dist = 16;
1034     // Search up the dominator tree for another heap stable test
1035     while (dom->Opcode() != op    ||  // Not same opcode?
1036            !ShenandoahBarrierC2Support::is_heap_stable_test(dom) ||  // Not same input 1?
1037            prev_dom->in(0) != dom) {  // One path of test does not dominate?
1038       if (dist < 0) return nullptr;
1039 
1040       dist--;
1041       prev_dom = dom;
1042       dom = IfNode::up_one_dom(dom);
1043       if (!dom) return nullptr;
1044     }
1045 
1046     // Check that we did not follow a loop back to ourselves
1047     if (n == dom) {
1048       return nullptr;
1049     }
1050 
1051     return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN(), false);
1052   }
1053 
1054   return nullptr;
1055 }
1056 
1057 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
1058   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1059     Node* u = n->fast_out(i);
1060     if (!is_shenandoah_wb_pre_call(u)) {
1061       return false;
1062     }
1063   }
1064   return n->outcnt() > 0;
1065 }
1066 
1067 bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode, Unique_Node_List& dead_nodes) const {
1068   switch (opcode) {
1069     case Op_CallLeaf:
1070     case Op_CallLeafNoFP: {
1071       assert (n->is_Call(), "");
1072       CallNode *call = n->as_Call();
1073       if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
1074         uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_Type()->domain_sig()->cnt();
1075         if (call->req() > cnt) {
1076           assert(call->req() == cnt + 1, "only one extra input");
1077           Node *addp = call->in(cnt);
1078           assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
1079           call->del_req(cnt);
1080         }
1081       }
1082       return false;
1083     }
1084     case Op_ShenandoahCompareAndSwapP:
1085     case Op_ShenandoahCompareAndSwapN:
1086     case Op_ShenandoahWeakCompareAndSwapN:
1087     case Op_ShenandoahWeakCompareAndSwapP:
1088     case Op_ShenandoahCompareAndExchangeP:
1089     case Op_ShenandoahCompareAndExchangeN:
1090       return true;
1091     case Op_ShenandoahLoadReferenceBarrier:
1092       assert(false, "should have been expanded already");
1093       return true;
1094     default:
1095       return false;
1096   }
1097 }
1098 
1099 bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
1100   switch (opcode) {
1101     case Op_ShenandoahCompareAndExchangeP:
1102     case Op_ShenandoahCompareAndExchangeN:
1103       conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
1104       // fallthrough
1105     case Op_ShenandoahWeakCompareAndSwapP:
1106     case Op_ShenandoahWeakCompareAndSwapN:
1107     case Op_ShenandoahCompareAndSwapP:
1108     case Op_ShenandoahCompareAndSwapN:
1109       conn_graph->add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1110       return true;
1111     case Op_StoreP: {
1112       Node* adr = n->in(MemNode::Address);
1113       const Type* adr_type = gvn->type(adr);
1114       // Pointer stores in Shenandoah barriers looks like unsafe access.
1115       // Ignore such stores to be able scalar replace non-escaping
1116       // allocations.
1117       if (adr_type->isa_rawptr() && adr->is_AddP()) {
1118         Node* base = conn_graph->get_addp_base(adr);
1119         if (base->Opcode() == Op_LoadP &&
1120           base->in(MemNode::Address)->is_AddP()) {
1121           adr = base->in(MemNode::Address);
1122           Node* tls = conn_graph->get_addp_base(adr);
1123           if (tls->Opcode() == Op_ThreadLocal) {
1124              int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1125              const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1126              if (offs == buf_offset) {
1127                return true; // Pre barrier previous oop value store.
1128              }
1129           }
1130         }
1131       }
1132       return false;
1133     }
1134     case Op_ShenandoahLoadReferenceBarrier:
1135       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
1136       return true;
1137     default:
1138       // Nothing
1139       break;
1140   }
1141   return false;
1142 }
1143 
1144 bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
1145   switch (opcode) {
1146     case Op_ShenandoahCompareAndExchangeP:
1147     case Op_ShenandoahCompareAndExchangeN: {
1148       Node *adr = n->in(MemNode::Address);
1149       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, nullptr);
1150       // fallthrough
1151     }
1152     case Op_ShenandoahCompareAndSwapP:
1153     case Op_ShenandoahCompareAndSwapN:
1154     case Op_ShenandoahWeakCompareAndSwapP:
1155     case Op_ShenandoahWeakCompareAndSwapN:
1156       return conn_graph->add_final_edges_unsafe_access(n, opcode);
1157     case Op_ShenandoahLoadReferenceBarrier:
1158       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), nullptr);
1159       return true;
1160     default:
1161       // Nothing
1162       break;
1163   }
1164   return false;
1165 }
1166 
1167 bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
1168   return n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) ||
1169          n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN);
1170 
1171 }
1172 
1173 bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
1174   switch (opcode) {
1175     case Op_ShenandoahCompareAndExchangeP:
1176     case Op_ShenandoahCompareAndExchangeN:
1177     case Op_ShenandoahWeakCompareAndSwapP:
1178     case Op_ShenandoahWeakCompareAndSwapN:
1179     case Op_ShenandoahCompareAndSwapP:
1180     case Op_ShenandoahCompareAndSwapN: {   // Convert trinary to binary-tree
1181       Node* newval = n->in(MemNode::ValueIn);
1182       Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
1183       Node* pair = new BinaryNode(oldval, newval);
1184       n->set_req(MemNode::ValueIn,pair);
1185       n->del_req(LoadStoreConditionalNode::ExpectedIn);
1186       return true;
1187     }
1188     default:
1189       break;
1190   }
1191   return false;
1192 }
1193 
1194 bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) const {
1195   return xop == Op_ShenandoahCompareAndExchangeP ||
1196          xop == Op_ShenandoahCompareAndExchangeN ||
1197          xop == Op_ShenandoahWeakCompareAndSwapP ||
1198          xop == Op_ShenandoahWeakCompareAndSwapN ||
1199          xop == Op_ShenandoahCompareAndSwapN ||
1200          xop == Op_ShenandoahCompareAndSwapP;
1201 }