1 /*
   2  * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.hpp"
  31 #include "gc/shenandoah/shenandoahRuntime.hpp"
  32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  33 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  34 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  36 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/graphKit.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "opto/runtime.hpp"
  46 
  47 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  48   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  49 }
  50 
  51 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
  52   : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8,  0, nullptr)),
  53     _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8,  0, nullptr)) {
  54 }
  55 
  56 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
  57   return _iu_barriers->length();
  58 }
  59 
  60 ShenandoahIUBarrierNode* ShenandoahBarrierSetC2State::iu_barrier(int idx) const {
  61   return _iu_barriers->at(idx);
  62 }
  63 
  64 void ShenandoahBarrierSetC2State::add_iu_barrier(ShenandoahIUBarrierNode* n) {
  65   assert(!_iu_barriers->contains(n), "duplicate entry in barrier list");
  66   _iu_barriers->append(n);
  67 }
  68 
  69 void ShenandoahBarrierSetC2State::remove_iu_barrier(ShenandoahIUBarrierNode* n) {
  70   _iu_barriers->remove_if_existing(n);
  71 }
  72 
  73 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
  74   return _load_reference_barriers->length();
  75 }
  76 
  77 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
  78   return _load_reference_barriers->at(idx);
  79 }
  80 
  81 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
  82   assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
  83   _load_reference_barriers->append(n);
  84 }
  85 
  86 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
  87   if (_load_reference_barriers->contains(n)) {
  88     _load_reference_barriers->remove(n);
  89   }
  90 }
  91 
  92 Node* ShenandoahBarrierSetC2::shenandoah_iu_barrier(GraphKit* kit, Node* obj) const {
  93   if (ShenandoahIUBarrier) {
  94     return kit->gvn().transform(new ShenandoahIUBarrierNode(obj));
  95   }
  96   return obj;
  97 }
  98 
  99 #define __ kit->
 100 
 101 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
 102                                                          BasicType bt, uint adr_idx) const {
 103   intptr_t offset = 0;
 104   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 105   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 106 
 107   if (offset == Type::OffsetBot) {
 108     return false; // cannot unalias unless there are precise offsets
 109   }
 110 
 111   if (alloc == nullptr) {
 112     return false; // No allocation found
 113   }
 114 
 115   intptr_t size_in_bytes = type2aelembytes(bt);
 116 
 117   Node* mem = __ memory(adr_idx); // start searching here...
 118 
 119   for (int cnt = 0; cnt < 50; cnt++) {
 120 
 121     if (mem->is_Store()) {
 122 
 123       Node* st_adr = mem->in(MemNode::Address);
 124       intptr_t st_offset = 0;
 125       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 126 
 127       if (st_base == nullptr) {
 128         break; // inscrutable pointer
 129       }
 130 
 131       // Break we have found a store with same base and offset as ours so break
 132       if (st_base == base && st_offset == offset) {
 133         break;
 134       }
 135 
 136       if (st_offset != offset && st_offset != Type::OffsetBot) {
 137         const int MAX_STORE = BytesPerLong;
 138         if (st_offset >= offset + size_in_bytes ||
 139             st_offset <= offset - MAX_STORE ||
 140             st_offset <= offset - mem->as_Store()->memory_size()) {
 141           // Success:  The offsets are provably independent.
 142           // (You may ask, why not just test st_offset != offset and be done?
 143           // The answer is that stores of different sizes can co-exist
 144           // in the same sequence of RawMem effects.  We sometimes initialize
 145           // a whole 'tile' of array elements with a single jint or jlong.)
 146           mem = mem->in(MemNode::Memory);
 147           continue; // advance through independent store memory
 148         }
 149       }
 150 
 151       if (st_base != base
 152           && MemNode::detect_ptr_independence(base, alloc, st_base,
 153                                               AllocateNode::Ideal_allocation(st_base, phase),
 154                                               phase)) {
 155         // Success:  The bases are provably independent.
 156         mem = mem->in(MemNode::Memory);
 157         continue; // advance through independent store memory
 158       }
 159     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 160 
 161       InitializeNode* st_init = mem->in(0)->as_Initialize();
 162       AllocateNode* st_alloc = st_init->allocation();
 163 
 164       // Make sure that we are looking at the same allocation site.
 165       // The alloc variable is guaranteed to not be null here from earlier check.
 166       if (alloc == st_alloc) {
 167         // Check that the initialization is storing null so that no previous store
 168         // has been moved up and directly write a reference
 169         Node* captured_store = st_init->find_captured_store(offset,
 170                                                             type2aelembytes(T_OBJECT),
 171                                                             phase);
 172         if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
 173           return true;
 174         }
 175       }
 176     }
 177 
 178     // Unless there is an explicit 'continue', we must bail out here,
 179     // because 'mem' is an inscrutable memory state (e.g., a call).
 180     break;
 181   }
 182 
 183   return false;
 184 }
 185 
 186 #undef __
 187 #define __ ideal.
 188 
 189 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
 190                                                     bool do_load,
 191                                                     Node* obj,
 192                                                     Node* adr,
 193                                                     uint alias_idx,
 194                                                     Node* val,
 195                                                     const TypeOopPtr* val_type,
 196                                                     Node* pre_val,
 197                                                     BasicType bt) const {
 198   // Some sanity checks
 199   // Note: val is unused in this routine.
 200 
 201   if (do_load) {
 202     // We need to generate the load of the previous value
 203     assert(adr != nullptr, "where are loading from?");
 204     assert(pre_val == nullptr, "loaded already?");
 205     assert(val_type != nullptr, "need a type");
 206 
 207     if (ReduceInitialCardMarks
 208         && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 209       return;
 210     }
 211 
 212   } else {
 213     // In this case both val_type and alias_idx are unused.
 214     assert(pre_val != nullptr, "must be loaded already");
 215     // Nothing to be done if pre_val is null.
 216     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 217     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 218   }
 219   assert(bt == T_OBJECT, "or we shouldn't be here");
 220 
 221   IdealKit ideal(kit, true);
 222 
 223   Node* tls = __ thread(); // ThreadLocalStorage
 224 
 225   Node* no_base = __ top();
 226   Node* zero  = __ ConI(0);
 227   Node* zeroX = __ ConX(0);
 228 
 229   float likely  = PROB_LIKELY(0.999);
 230   float unlikely  = PROB_UNLIKELY(0.999);
 231 
 232   // Offsets into the thread
 233   const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
 234   const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 235 
 236   // Now the actual pointers into the thread
 237   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 238   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 239 
 240   // Now some of the values
 241   Node* marking;
 242   Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
 243   Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
 244   marking = __ AndI(ld, __ ConI(ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING));
 245   assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
 246 
 247   // if (!marking)
 248   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 249     BasicType index_bt = TypeX_X->basic_type();
 250     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
 251     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 252 
 253     if (do_load) {
 254       // load original value
 255       // alias_idx correct??
 256       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 257     }
 258 
 259     // if (pre_val != nullptr)
 260     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 261       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 262 
 263       // is the queue for this thread full?
 264       __ if_then(index, BoolTest::ne, zeroX, likely); {
 265 
 266         // decrement the index
 267         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 268 
 269         // Now get the buffer location we will log the previous value into and store it
 270         Node *log_addr = __ AddP(no_base, buffer, next_index);
 271         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 272         // update the index
 273         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 274 
 275       } __ else_(); {
 276 
 277         // logging buffer is full, call the runtime
 278         const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
 279         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
 280       } __ end_if();  // (!index)
 281     } __ end_if();  // (pre_val != nullptr)
 282   } __ end_if();  // (!marking)
 283 
 284   // Final sync IdealKit and GraphKit.
 285   kit->final_sync(ideal);
 286 
 287   if (ShenandoahSATBBarrier && adr != nullptr) {
 288     Node* c = kit->control();
 289     Node* call = c->in(1)->in(1)->in(1)->in(0);
 290     assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
 291     call->add_req(adr);
 292   }
 293 }
 294 
 295 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
 296   return call->is_CallLeaf() &&
 297          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry);
 298 }
 299 
 300 bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) {
 301   if (!call->is_CallLeaf()) {
 302     return false;
 303   }
 304 
 305   address entry_point = call->as_CallLeaf()->entry_point();
 306   return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)) ||
 307          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)) ||
 308          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)) ||
 309          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)) ||
 310          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
 311 }
 312 
 313 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
 314   if (n->Opcode() != Op_If) {
 315     return false;
 316   }
 317 
 318   Node* bol = n->in(1);
 319   assert(bol->is_Bool(), "");
 320   Node* cmpx = bol->in(1);
 321   if (bol->as_Bool()->_test._test == BoolTest::ne &&
 322       cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
 323       is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
 324       cmpx->in(1)->in(2)->is_Con() &&
 325       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING)) {
 326     return true;
 327   }
 328 
 329   return false;
 330 }
 331 
 332 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
 333   if (!n->is_Load()) return false;
 334   const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
 335   return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
 336          && n->in(2)->in(3)->is_Con()
 337          && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
 338 }
 339 
 340 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
 341                                                           bool do_load,
 342                                                           Node* obj,
 343                                                           Node* adr,
 344                                                           uint alias_idx,
 345                                                           Node* val,
 346                                                           const TypeOopPtr* val_type,
 347                                                           Node* pre_val,
 348                                                           BasicType bt) const {
 349   if (ShenandoahSATBBarrier) {
 350     IdealKit ideal(kit);
 351     kit->sync_kit(ideal);
 352 
 353     satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
 354 
 355     ideal.sync_kit(kit);
 356     kit->final_sync(ideal);
 357   }
 358 }
 359 
 360 // Helper that guards and inserts a pre-barrier.
 361 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 362                                                 Node* pre_val, bool need_mem_bar) const {
 363   // We could be accessing the referent field of a reference object. If so, when Shenandoah
 364   // is enabled, we need to log the value in the referent field in an SATB buffer.
 365   // This routine performs some compile time filters and generates suitable
 366   // runtime filters that guard the pre-barrier code.
 367   // Also add memory barrier for non volatile load from the referent field
 368   // to prevent commoning of loads across safepoint.
 369 
 370   // Some compile time checks.
 371 
 372   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 373   const TypeX* otype = offset->find_intptr_t_type();
 374   if (otype != nullptr && otype->is_con() &&
 375       otype->get_con() != java_lang_ref_Reference::referent_offset()) {
 376     // Constant offset but not the reference_offset so just return
 377     return;
 378   }
 379 
 380   // We only need to generate the runtime guards for instances.
 381   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 382   if (btype != nullptr) {
 383     if (btype->isa_aryptr()) {
 384       // Array type so nothing to do
 385       return;
 386     }
 387 
 388     const TypeInstPtr* itype = btype->isa_instptr();
 389     if (itype != nullptr) {
 390       // Can the klass of base_oop be statically determined to be
 391       // _not_ a sub-class of Reference and _not_ Object?
 392       ciKlass* klass = itype->instance_klass();
 393       if (klass->is_loaded() &&
 394           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 395           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 396         return;
 397       }
 398     }
 399   }
 400 
 401   // The compile time filters did not reject base_oop/offset so
 402   // we need to generate the following runtime filters
 403   //
 404   // if (offset == java_lang_ref_Reference::_reference_offset) {
 405   //   if (instance_of(base, java.lang.ref.Reference)) {
 406   //     pre_barrier(_, pre_val, ...);
 407   //   }
 408   // }
 409 
 410   float likely   = PROB_LIKELY(  0.999);
 411   float unlikely = PROB_UNLIKELY(0.999);
 412 
 413   IdealKit ideal(kit);
 414 
 415   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset());
 416 
 417   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 418       // Update graphKit memory and control from IdealKit.
 419       kit->sync_kit(ideal);
 420 
 421       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 422       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 423 
 424       // Update IdealKit memory and control from graphKit.
 425       __ sync_kit(kit);
 426 
 427       Node* one = __ ConI(1);
 428       // is_instof == 0 if base_oop == nullptr
 429       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 430 
 431         // Update graphKit from IdeakKit.
 432         kit->sync_kit(ideal);
 433 
 434         // Use the pre-barrier to record the value in the referent field
 435         satb_write_barrier_pre(kit, false /* do_load */,
 436                                nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 437                                pre_val /* pre_val */,
 438                                T_OBJECT);
 439         if (need_mem_bar) {
 440           // Add memory barrier to prevent commoning reads from this field
 441           // across safepoint since GC can change its value.
 442           kit->insert_mem_bar(Op_MemBarCPUOrder);
 443         }
 444         // Update IdealKit from graphKit.
 445         __ sync_kit(kit);
 446 
 447       } __ end_if(); // _ref_type != ref_none
 448   } __ end_if(); // offset == referent_offset
 449 
 450   // Final sync IdealKit and GraphKit.
 451   kit->final_sync(ideal);
 452 }
 453 
 454 Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
 455   BarrierSet* bs = BarrierSet::barrier_set();
 456   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
 457   CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base();
 458   if (card_table_base != nullptr) {
 459     return kit->makecon(TypeRawPtr::make((address)card_table_base));
 460   } else {
 461     return kit->null();
 462   }
 463 }
 464 
 465 void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
 466                                           Node* ctl,
 467                                           Node* oop_store,
 468                                           Node* obj,
 469                                           Node* adr,
 470                                           uint  adr_idx,
 471                                           Node* val,
 472                                           BasicType bt,
 473                                           bool use_precise) const {
 474   if (!ShenandoahHeap::heap()->mode()->is_generational()) {
 475     return;
 476   }
 477 
 478   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 479   CardTable* ct = ctbs->card_table();
 480   // No store check needed if we're storing a nullptr or an old object
 481   // (latter case is probably a string constant). The concurrent
 482   // mark sweep garbage collector, however, needs to have all nonNull
 483   // oop updates flagged via card-marks.
 484   if (val != nullptr && val->is_Con()) {
 485     // must be either an oop or NULL
 486     const Type* t = val->bottom_type();
 487     if (t == TypePtr::NULL_PTR || t == Type::TOP)
 488       // stores of null never (?) need barriers
 489       return;
 490   }
 491 
 492   if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
 493     // We can skip marks on a freshly-allocated object in Eden.
 494     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
 495     // That routine informs GC to take appropriate compensating steps,
 496     // upon a slow-path allocation, so as to make this card-mark
 497     // elision safe.
 498     return;
 499   }
 500 
 501   if (!use_precise) {
 502     // All card marks for a (non-array) instance are in one place:
 503     adr = obj;
 504   }
 505   // (Else it's an array (or unknown), and we want more precise card marks.)
 506   assert(adr != nullptr, "");
 507 
 508   IdealKit ideal(kit, true);
 509 
 510   // Convert the pointer to an int prior to doing math on it
 511   Node* cast = __ CastPX(__ ctrl(), adr);
 512 
 513   // Divide by card size
 514   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );
 515 
 516   // Combine card table base and card offset
 517   Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
 518 
 519   // Get the alias_index for raw card-mark memory
 520   int adr_type = Compile::AliasIdxRaw;
 521   Node*   zero = __ ConI(0); // Dirty card value
 522 
 523   if (UseCondCardMark) {
 524     // The classic GC reference write barrier is typically implemented
 525     // as a store into the global card mark table.  Unfortunately
 526     // unconditional stores can result in false sharing and excessive
 527     // coherence traffic as well as false transactional aborts.
 528     // UseCondCardMark enables MP "polite" conditional card mark
 529     // stores.  In theory we could relax the load from ctrl() to
 530     // no_ctrl, but that doesn't buy much latitude.
 531     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
 532     __ if_then(card_val, BoolTest::ne, zero);
 533   }
 534 
 535   // Smash zero into card
 536   __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
 537 
 538   if (UseCondCardMark) {
 539     __ end_if();
 540   }
 541 
 542   // Final sync IdealKit and GraphKit.
 543   kit->final_sync(ideal);
 544 }
 545 
 546 #undef __
 547 
 548 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
 549   const Type **fields = TypeTuple::fields(2);
 550   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 551   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 552   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 553 
 554   // create result type (range)
 555   fields = TypeTuple::fields(0);
 556   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 557 
 558   return TypeFunc::make(domain, range);
 559 }
 560 
 561 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
 562   const Type **fields = TypeTuple::fields(1);
 563   fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
 564   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 565 
 566   // create result type (range)
 567   fields = TypeTuple::fields(0);
 568   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 569 
 570   return TypeFunc::make(domain, range);
 571 }
 572 
 573 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() {
 574   const Type **fields = TypeTuple::fields(2);
 575   fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM; // original field value
 576   fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address
 577 
 578   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 579 
 580   // create result type (range)
 581   fields = TypeTuple::fields(1);
 582   fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM;
 583   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 584 
 585   return TypeFunc::make(domain, range);
 586 }
 587 
 588 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 589   DecoratorSet decorators = access.decorators();
 590 
 591   const TypePtr* adr_type = access.addr().type();
 592   Node* adr = access.addr().node();
 593 
 594   if (!access.is_oop()) {
 595     return BarrierSetC2::store_at_resolved(access, val);
 596   }
 597 
 598   if (access.is_parse_access()) {
 599     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 600     GraphKit* kit = parse_access.kit();
 601 
 602     uint adr_idx = kit->C->get_alias_index(adr_type);
 603     assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
 604     Node* value = val.node();
 605     value = shenandoah_iu_barrier(kit, value);
 606     val.set_node(value);
 607     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 608                                  static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
 609 
 610     Node* result = BarrierSetC2::store_at_resolved(access, val);
 611 
 612     bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 613     bool is_array = (decorators & IS_ARRAY) != 0;
 614     bool use_precise = is_array || anonymous;
 615     post_barrier(kit, kit->control(), access.raw_access(), access.base(), adr, adr_idx, val.node(), access.type(), use_precise);
 616     return result;
 617   } else {
 618     assert(access.is_opt_access(), "only for optimization passes");
 619     assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
 620     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 621     PhaseGVN& gvn =  opt_access.gvn();
 622 
 623     if (ShenandoahIUBarrier) {
 624       Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
 625       val.set_node(enqueue);
 626     }
 627     return BarrierSetC2::store_at_resolved(access, val);
 628   }
 629 }
 630 
 631 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 632   // 1: non-reference load, no additional barrier is needed
 633   if (!access.is_oop()) {
 634     return BarrierSetC2::load_at_resolved(access, val_type);;
 635   }
 636 
 637   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 638   DecoratorSet decorators = access.decorators();
 639   BasicType type = access.type();
 640 
 641   // 2: apply LRB if needed
 642   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 643     load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
 644     if (access.is_parse_access()) {
 645       load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 646     } else {
 647       load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 648     }
 649   }
 650 
 651   // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
 652   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 653     Node* top = Compile::current()->top();
 654     Node* adr = access.addr().node();
 655     Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 656     Node* obj = access.base();
 657 
 658     bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 659     bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 660     bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 661 
 662     // If we are reading the value of the referent field of a Reference
 663     // object (either by using Unsafe directly or through reflection)
 664     // then, if SATB is enabled, we need to record the referent in an
 665     // SATB log buffer using the pre-barrier mechanism.
 666     // Also we need to add memory barrier to prevent commoning reads
 667     // from this field across safepoint since GC can change its value.
 668     if (!on_weak_ref || (unknown && (offset == top || obj == top)) || !keep_alive) {
 669       return load;
 670     }
 671 
 672     assert(access.is_parse_access(), "entry not supported at optimization time");
 673     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 674     GraphKit* kit = parse_access.kit();
 675     bool mismatched = (decorators & C2_MISMATCHED) != 0;
 676     bool is_unordered = (decorators & MO_UNORDERED) != 0;
 677     bool in_native = (decorators & IN_NATIVE) != 0;
 678     bool need_cpu_mem_bar = !is_unordered || mismatched || in_native;
 679 
 680     if (on_weak_ref) {
 681       // Use the pre-barrier to record the value in the referent field
 682       satb_write_barrier_pre(kit, false /* do_load */,
 683                              nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 684                              load /* pre_val */, T_OBJECT);
 685       // Add memory barrier to prevent commoning reads from this field
 686       // across safepoint since GC can change its value.
 687       kit->insert_mem_bar(Op_MemBarCPUOrder);
 688     } else if (unknown) {
 689       // We do not require a mem bar inside pre_barrier if need_mem_bar
 690       // is set: the barriers would be emitted by us.
 691       insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 692     }
 693   }
 694 
 695   return load;
 696 }
 697 
 698 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 699                                                              Node* new_val, const Type* value_type) const {
 700   GraphKit* kit = access.kit();
 701   if (access.is_oop()) {
 702     new_val = shenandoah_iu_barrier(kit, new_val);
 703     shenandoah_write_barrier_pre(kit, false /* do_load */,
 704                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 705                                  expected_val /* pre_val */, T_OBJECT);
 706 
 707     MemNode::MemOrd mo = access.mem_node_mo();
 708     Node* mem = access.memory();
 709     Node* adr = access.addr().node();
 710     const TypePtr* adr_type = access.addr().type();
 711     Node* load_store = nullptr;
 712 
 713 #ifdef _LP64
 714     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 715       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 716       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 717       if (ShenandoahCASBarrier) {
 718         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 719       } else {
 720         load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 721       }
 722     } else
 723 #endif
 724     {
 725       if (ShenandoahCASBarrier) {
 726         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 727       } else {
 728         load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 729       }
 730     }
 731 
 732     access.set_raw_access(load_store);
 733     pin_atomic_op(access);
 734 
 735 #ifdef _LP64
 736     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 737       load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 738     }
 739 #endif
 740     load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
 741     post_barrier(kit, kit->control(), access.raw_access(), access.base(), access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
 742     return load_store;
 743   }
 744   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
 745 }
 746 
 747 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 748                                                               Node* new_val, const Type* value_type) const {
 749   GraphKit* kit = access.kit();
 750   if (access.is_oop()) {
 751     new_val = shenandoah_iu_barrier(kit, new_val);
 752     shenandoah_write_barrier_pre(kit, false /* do_load */,
 753                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 754                                  expected_val /* pre_val */, T_OBJECT);
 755     DecoratorSet decorators = access.decorators();
 756     MemNode::MemOrd mo = access.mem_node_mo();
 757     Node* mem = access.memory();
 758     bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
 759     Node* load_store = nullptr;
 760     Node* adr = access.addr().node();
 761 #ifdef _LP64
 762     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 763       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 764       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 765       if (ShenandoahCASBarrier) {
 766         if (is_weak_cas) {
 767           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 768         } else {
 769           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 770         }
 771       } else {
 772         if (is_weak_cas) {
 773           load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 774         } else {
 775           load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 776         }
 777       }
 778     } else
 779 #endif
 780     {
 781       if (ShenandoahCASBarrier) {
 782         if (is_weak_cas) {
 783           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 784         } else {
 785           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 786         }
 787       } else {
 788         if (is_weak_cas) {
 789           load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 790         } else {
 791           load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 792         }
 793       }
 794     }
 795     access.set_raw_access(load_store);
 796     pin_atomic_op(access);
 797     post_barrier(kit, kit->control(), access.raw_access(), access.base(),
 798                  access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
 799     return load_store;
 800   }
 801   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 802 }
 803 
 804 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
 805   GraphKit* kit = access.kit();
 806   if (access.is_oop()) {
 807     val = shenandoah_iu_barrier(kit, val);
 808   }
 809   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
 810   if (access.is_oop()) {
 811     result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
 812     shenandoah_write_barrier_pre(kit, false /* do_load */,
 813                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 814                                  result /* pre_val */, T_OBJECT);
 815     post_barrier(kit, kit->control(), access.raw_access(), access.base(),
 816                  access.addr().node(), access.alias_idx(), val, T_OBJECT, true);
 817   }
 818   return result;
 819 }
 820 
 821 
 822 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
 823   return is_shenandoah_wb_pre_call(node);
 824 }
 825 
 826 // Support for GC barriers emitted during parsing
 827 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
 828   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier || node->Opcode() == Op_ShenandoahIUBarrier) return true;
 829   if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
 830     return false;
 831   }
 832   CallLeafNode *call = node->as_CallLeaf();
 833   if (call->_name == nullptr) {
 834     return false;
 835   }
 836 
 837   return strcmp(call->_name, "shenandoah_clone_barrier") == 0 ||
 838          strcmp(call->_name, "shenandoah_cas_obj") == 0 ||
 839          strcmp(call->_name, "shenandoah_wb_pre") == 0;
 840 }
 841 
 842 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
 843   if (c == nullptr) {
 844     return c;
 845   }
 846   if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 847     return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
 848   }
 849   if (c->Opcode() == Op_ShenandoahIUBarrier) {
 850     c = c->in(1);
 851   }
 852   return c;
 853 }
 854 
 855 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
 856   return !ShenandoahBarrierC2Support::expand(C, igvn);
 857 }
 858 
 859 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
 860   if (mode == LoopOptsShenandoahExpand) {
 861     assert(UseShenandoahGC, "only for shenandoah");
 862     ShenandoahBarrierC2Support::pin_and_expand(phase);
 863     return true;
 864   } else if (mode == LoopOptsShenandoahPostExpand) {
 865     assert(UseShenandoahGC, "only for shenandoah");
 866     visited.clear();
 867     ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
 868     return true;
 869   }
 870   return false;
 871 }
 872 
 873 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
 874   bool is_oop = is_reference_type(type);
 875   if (!is_oop) {
 876     return false;
 877   }
 878   if (ShenandoahSATBBarrier && tightly_coupled_alloc) {
 879     if (phase == Optimization) {
 880       return false;
 881     }
 882     return !is_clone;
 883   }
 884   if (phase == Optimization) {
 885     return !ShenandoahIUBarrier;
 886   }
 887   return true;
 888 }
 889 
 890 bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
 891   const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
 892   if (src_type->isa_instptr() != nullptr) {
 893     ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
 894     if ((src_type->klass_is_exact() || !ik->has_subklass()) && !ik->has_injected_fields()) {
 895       if (ik->has_object_fields()) {
 896         return true;
 897       } else {
 898         if (!src_type->klass_is_exact()) {
 899           Compile::current()->dependencies()->assert_leaf_type(ik);
 900         }
 901       }
 902     } else {
 903       return true;
 904     }
 905   } else if (src_type->isa_aryptr()) {
 906     BasicType src_elem = src_type->isa_aryptr()->elem()->array_element_basic_type();
 907     if (is_reference_type(src_elem, true)) {
 908       return true;
 909     }
 910   } else {
 911     return true;
 912   }
 913   return false;
 914 }
 915 
 916 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
 917   Node* ctrl = ac->in(TypeFunc::Control);
 918   Node* mem = ac->in(TypeFunc::Memory);
 919   Node* src_base = ac->in(ArrayCopyNode::Src);
 920   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
 921   Node* dest_base = ac->in(ArrayCopyNode::Dest);
 922   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
 923   Node* length = ac->in(ArrayCopyNode::Length);
 924 
 925   Node* src = phase->basic_plus_adr(src_base, src_offset);
 926   Node* dest = phase->basic_plus_adr(dest_base, dest_offset);
 927 
 928   if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
 929     // Check if heap is has forwarded objects. If it does, we need to call into the special
 930     // routine that would fix up source references before we can continue.
 931 
 932     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
 933     Node* region = new RegionNode(PATH_LIMIT);
 934     Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
 935 
 936     Node* thread = phase->transform_later(new ThreadLocalNode());
 937     Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 938     Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
 939 
 940     uint gc_state_idx = Compile::AliasIdxRaw;
 941     const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
 942     debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 943 
 944     Node* gc_state    = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
 945     int flags = ShenandoahHeap::HAS_FORWARDED;
 946     if (ShenandoahIUBarrier) {
 947       flags |= ShenandoahHeap::YOUNG_MARKING;
 948     }
 949     Node* stable_and  = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(flags)));
 950     Node* stable_cmp  = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
 951     Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
 952 
 953     IfNode* stable_iff  = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
 954     Node* stable_ctrl   = phase->transform_later(new IfFalseNode(stable_iff));
 955     Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
 956 
 957     // Heap is stable, no need to do anything additional
 958     region->init_req(_heap_stable, stable_ctrl);
 959     mem_phi->init_req(_heap_stable, mem);
 960 
 961     // Heap is unstable, call into clone barrier stub
 962     Node* call = phase->make_leaf_call(unstable_ctrl, mem,
 963                     ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
 964                     CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
 965                     "shenandoah_clone",
 966                     TypeRawPtr::BOTTOM,
 967                     src_base);
 968     call = phase->transform_later(call);
 969 
 970     ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control));
 971     mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory));
 972     region->init_req(_heap_unstable, ctrl);
 973     mem_phi->init_req(_heap_unstable, mem);
 974 
 975     // Wire up the actual arraycopy stub now
 976     ctrl = phase->transform_later(region);
 977     mem = phase->transform_later(mem_phi);
 978 
 979     const char* name = "arraycopy";
 980     call = phase->make_leaf_call(ctrl, mem,
 981                                  OptoRuntime::fast_arraycopy_Type(),
 982                                  phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, name, true),
 983                                  name, TypeRawPtr::BOTTOM,
 984                                  src, dest, length
 985                                  LP64_ONLY(COMMA phase->top()));
 986     call = phase->transform_later(call);
 987 
 988     // Hook up the whole thing into the graph
 989     phase->igvn().replace_node(ac, call);
 990   } else {
 991     BarrierSetC2::clone_at_expansion(phase, ac);
 992   }
 993 }
 994 
 995 
 996 // Support for macro expanded GC barriers
 997 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
 998   if (node->Opcode() == Op_ShenandoahIUBarrier) {
 999     state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
1000   }
1001   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1002     state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1003   }
1004 }
1005 
1006 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
1007   if (node->Opcode() == Op_ShenandoahIUBarrier) {
1008     state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
1009   }
1010   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1011     state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1012   }
1013 }
1014 
1015 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
1016   if (is_shenandoah_wb_pre_call(node)) {
1017     shenandoah_eliminate_wb_pre(node, &macro->igvn());
1018   }
1019   if (node->Opcode() == Op_CastP2X && ShenandoahHeap::heap()->mode()->is_generational()) {
1020     assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
1021      Node *shift = node->unique_out();
1022      Node *addp = shift->unique_out();
1023      for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
1024        Node *mem = addp->last_out(j);
1025        if (UseCondCardMark && mem->is_Load()) {
1026          assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
1027          // The load is checking if the card has been written so
1028          // replace it with zero to fold the test.
1029          macro->replace_node(mem, macro->intcon(0));
1030          continue;
1031        }
1032        assert(mem->is_Store(), "store required");
1033        macro->replace_node(mem, mem->in(MemNode::Memory));
1034      }
1035   }
1036 }
1037 
1038 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
1039   assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
1040   Node* c = call->as_Call()->proj_out(TypeFunc::Control);
1041   c = c->unique_ctrl_out();
1042   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1043   c = c->unique_ctrl_out();
1044   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1045   Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1046   assert(iff->is_If(), "expect test");
1047   if (!is_shenandoah_marking_if(igvn, iff)) {
1048     c = c->unique_ctrl_out();
1049     assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1050     iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1051     assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
1052   }
1053   Node* cmpx = iff->in(1)->in(1);
1054   igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
1055   igvn->rehash_node_delayed(call);
1056   call->del_req(call->req()-1);
1057 }
1058 
1059 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
1060   if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) {
1061     igvn->add_users_to_worklist(node);
1062   }
1063 }
1064 
1065 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
1066   for (uint i = 0; i < useful.size(); i++) {
1067     Node* n = useful.at(i);
1068     if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) {
1069       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1070         C->record_for_igvn(n->fast_out(i));
1071       }
1072     }
1073   }
1074   for (int i = state()->iu_barriers_count() - 1; i >= 0; i--) {
1075     ShenandoahIUBarrierNode* n = state()->iu_barrier(i);
1076     if (!useful.member(n)) {
1077       state()->remove_iu_barrier(n);
1078     }
1079   }
1080   for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
1081     ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
1082     if (!useful.member(n)) {
1083       state()->remove_load_reference_barrier(n);
1084     }
1085   }
1086 }
1087 
1088 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
1089   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
1090 }
1091 
1092 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
1093   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
1094 }
1095 
1096 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
1097 // expanded later, then now is the time to do so.
1098 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
1099 
1100 #ifdef ASSERT
1101 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
1102   if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeMacroExpand) {
1103     ShenandoahBarrierC2Support::verify(Compile::current()->root());
1104   } else if (phase == BarrierSetC2::BeforeCodeGen) {
1105     // Verify Shenandoah pre-barriers
1106     const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
1107 
1108     Unique_Node_List visited;
1109     Node_List worklist;
1110     // We're going to walk control flow backwards starting from the Root
1111     worklist.push(compile->root());
1112     while (worklist.size() > 0) {
1113       Node *x = worklist.pop();
1114       if (x == nullptr || x == compile->top()) continue;
1115       if (visited.member(x)) {
1116         continue;
1117       } else {
1118         visited.push(x);
1119       }
1120 
1121       if (x->is_Region()) {
1122         for (uint i = 1; i < x->req(); i++) {
1123           worklist.push(x->in(i));
1124         }
1125       } else {
1126         worklist.push(x->in(0));
1127         // We are looking for the pattern:
1128         //                            /->ThreadLocal
1129         // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
1130         //              \->ConI(0)
1131         // We want to verify that the If and the LoadB have the same control
1132         // See GraphKit::g1_write_barrier_pre()
1133         if (x->is_If()) {
1134           IfNode *iff = x->as_If();
1135           if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
1136             CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
1137             if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
1138                 && cmp->in(1)->is_Load()) {
1139               LoadNode *load = cmp->in(1)->as_Load();
1140               if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
1141                   && load->in(2)->in(3)->is_Con()
1142                   && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
1143 
1144                 Node *if_ctrl = iff->in(0);
1145                 Node *load_ctrl = load->in(0);
1146 
1147                 if (if_ctrl != load_ctrl) {
1148                   // Skip possible CProj->NeverBranch in infinite loops
1149                   if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
1150                       && if_ctrl->in(0)->is_NeverBranch()) {
1151                     if_ctrl = if_ctrl->in(0)->in(0);
1152                   }
1153                 }
1154                 assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
1155               }
1156             }
1157           }
1158         }
1159       }
1160     }
1161   }
1162 }
1163 #endif
1164 
1165 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const {
1166   if (is_shenandoah_wb_pre_call(n)) {
1167     uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
1168     if (n->req() > cnt) {
1169       Node* addp = n->in(cnt);
1170       if (has_only_shenandoah_wb_pre_uses(addp)) {
1171         n->del_req(cnt);
1172         if (can_reshape) {
1173           phase->is_IterGVN()->_worklist.push(addp);
1174         }
1175         return n;
1176       }
1177     }
1178   }
1179   if (n->Opcode() == Op_CmpP) {
1180     Node* in1 = n->in(1);
1181     Node* in2 = n->in(2);
1182 
1183     // If one input is null, then step over the strong LRB barriers on the other input
1184     if (in1->bottom_type() == TypePtr::NULL_PTR &&
1185         !((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
1186           !ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in2)->decorators()))) {
1187       in2 = step_over_gc_barrier(in2);
1188     }
1189     if (in2->bottom_type() == TypePtr::NULL_PTR &&
1190         !((in1->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
1191           !ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in1)->decorators()))) {
1192       in1 = step_over_gc_barrier(in1);
1193     }
1194 
1195     if (in1 != n->in(1)) {
1196       n->set_req_X(1, in1, phase);
1197       assert(in2 == n->in(2), "only one change");
1198       return n;
1199     }
1200     if (in2 != n->in(2)) {
1201       n->set_req_X(2, in2, phase);
1202       return n;
1203     }
1204   } else if (can_reshape &&
1205              n->Opcode() == Op_If &&
1206              ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
1207              n->in(0) != nullptr &&
1208              n->outcnt() == 2) {
1209     Node* dom = n->in(0);
1210     Node* prev_dom = n;
1211     int op = n->Opcode();
1212     int dist = 16;
1213     // Search up the dominator tree for another heap stable test
1214     while (dom->Opcode() != op    ||  // Not same opcode?
1215            !ShenandoahBarrierC2Support::is_heap_stable_test(dom) ||  // Not same input 1?
1216            prev_dom->in(0) != dom) {  // One path of test does not dominate?
1217       if (dist < 0) return nullptr;
1218 
1219       dist--;
1220       prev_dom = dom;
1221       dom = IfNode::up_one_dom(dom);
1222       if (!dom) return nullptr;
1223     }
1224 
1225     // Check that we did not follow a loop back to ourselves
1226     if (n == dom) {
1227       return nullptr;
1228     }
1229 
1230     return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN());
1231   }
1232 
1233   return nullptr;
1234 }
1235 
1236 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
1237   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1238     Node* u = n->fast_out(i);
1239     if (!is_shenandoah_wb_pre_call(u)) {
1240       return false;
1241     }
1242   }
1243   return n->outcnt() > 0;
1244 }
1245 
1246 bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode, Unique_Node_List& dead_nodes) const {
1247   switch (opcode) {
1248     case Op_CallLeaf:
1249     case Op_CallLeafNoFP: {
1250       assert (n->is_Call(), "");
1251       CallNode *call = n->as_Call();
1252       if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
1253         uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
1254         if (call->req() > cnt) {
1255           assert(call->req() == cnt + 1, "only one extra input");
1256           Node *addp = call->in(cnt);
1257           assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
1258           call->del_req(cnt);
1259         }
1260       }
1261       return false;
1262     }
1263     case Op_ShenandoahCompareAndSwapP:
1264     case Op_ShenandoahCompareAndSwapN:
1265     case Op_ShenandoahWeakCompareAndSwapN:
1266     case Op_ShenandoahWeakCompareAndSwapP:
1267     case Op_ShenandoahCompareAndExchangeP:
1268     case Op_ShenandoahCompareAndExchangeN:
1269       return true;
1270     case Op_ShenandoahLoadReferenceBarrier:
1271       assert(false, "should have been expanded already");
1272       return true;
1273     default:
1274       return false;
1275   }
1276 }
1277 
1278 bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
1279   switch (opcode) {
1280     case Op_ShenandoahCompareAndExchangeP:
1281     case Op_ShenandoahCompareAndExchangeN:
1282       conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
1283       // fallthrough
1284     case Op_ShenandoahWeakCompareAndSwapP:
1285     case Op_ShenandoahWeakCompareAndSwapN:
1286     case Op_ShenandoahCompareAndSwapP:
1287     case Op_ShenandoahCompareAndSwapN:
1288       conn_graph->add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1289       return true;
1290     case Op_StoreP: {
1291       Node* adr = n->in(MemNode::Address);
1292       const Type* adr_type = gvn->type(adr);
1293       // Pointer stores in Shenandoah barriers looks like unsafe access.
1294       // Ignore such stores to be able scalar replace non-escaping
1295       // allocations.
1296       if (adr_type->isa_rawptr() && adr->is_AddP()) {
1297         Node* base = conn_graph->get_addp_base(adr);
1298         if (base->Opcode() == Op_LoadP &&
1299           base->in(MemNode::Address)->is_AddP()) {
1300           adr = base->in(MemNode::Address);
1301           Node* tls = conn_graph->get_addp_base(adr);
1302           if (tls->Opcode() == Op_ThreadLocal) {
1303              int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1304              const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1305              if (offs == buf_offset) {
1306                return true; // Pre barrier previous oop value store.
1307              }
1308           }
1309         }
1310       }
1311       return false;
1312     }
1313     case Op_ShenandoahIUBarrier:
1314       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1315       break;
1316     case Op_ShenandoahLoadReferenceBarrier:
1317       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
1318       return true;
1319     default:
1320       // Nothing
1321       break;
1322   }
1323   return false;
1324 }
1325 
1326 bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
1327   switch (opcode) {
1328     case Op_ShenandoahCompareAndExchangeP:
1329     case Op_ShenandoahCompareAndExchangeN: {
1330       Node *adr = n->in(MemNode::Address);
1331       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, nullptr);
1332       // fallthrough
1333     }
1334     case Op_ShenandoahCompareAndSwapP:
1335     case Op_ShenandoahCompareAndSwapN:
1336     case Op_ShenandoahWeakCompareAndSwapP:
1337     case Op_ShenandoahWeakCompareAndSwapN:
1338       return conn_graph->add_final_edges_unsafe_access(n, opcode);
1339     case Op_ShenandoahIUBarrier:
1340       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1341       return true;
1342     case Op_ShenandoahLoadReferenceBarrier:
1343       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), nullptr);
1344       return true;
1345     default:
1346       // Nothing
1347       break;
1348   }
1349   return false;
1350 }
1351 
1352 bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
1353   return n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) ||
1354          n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN);
1355 
1356 }
1357 
1358 bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
1359   switch (opcode) {
1360     case Op_ShenandoahCompareAndExchangeP:
1361     case Op_ShenandoahCompareAndExchangeN:
1362     case Op_ShenandoahWeakCompareAndSwapP:
1363     case Op_ShenandoahWeakCompareAndSwapN:
1364     case Op_ShenandoahCompareAndSwapP:
1365     case Op_ShenandoahCompareAndSwapN: {   // Convert trinary to binary-tree
1366       Node* newval = n->in(MemNode::ValueIn);
1367       Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
1368       Node* pair = new BinaryNode(oldval, newval);
1369       n->set_req(MemNode::ValueIn,pair);
1370       n->del_req(LoadStoreConditionalNode::ExpectedIn);
1371       return true;
1372     }
1373     default:
1374       break;
1375   }
1376   return false;
1377 }
1378 
1379 bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) const {
1380   return xop == Op_ShenandoahCompareAndExchangeP ||
1381          xop == Op_ShenandoahCompareAndExchangeN ||
1382          xop == Op_ShenandoahWeakCompareAndSwapP ||
1383          xop == Op_ShenandoahWeakCompareAndSwapN ||
1384          xop == Op_ShenandoahCompareAndSwapN ||
1385          xop == Op_ShenandoahCompareAndSwapP;
1386 }