1 /*
   2  * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahCardTable.hpp"
  31 #include "gc/shenandoah/shenandoahForwarding.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  36 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  37 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/graphKit.hpp"
  41 #include "opto/idealKit.hpp"
  42 #include "opto/macro.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "opto/runtime.hpp"
  47 
  48 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  49   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  50 }
  51 
  52 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
  53   : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8,  0, nullptr)),
  54     _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8,  0, nullptr)) {
  55 }
  56 
  57 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
  58   return _iu_barriers->length();
  59 }
  60 
  61 ShenandoahIUBarrierNode* ShenandoahBarrierSetC2State::iu_barrier(int idx) const {
  62   return _iu_barriers->at(idx);
  63 }
  64 
  65 void ShenandoahBarrierSetC2State::add_iu_barrier(ShenandoahIUBarrierNode* n) {
  66   assert(!_iu_barriers->contains(n), "duplicate entry in barrier list");
  67   _iu_barriers->append(n);
  68 }
  69 
  70 void ShenandoahBarrierSetC2State::remove_iu_barrier(ShenandoahIUBarrierNode* n) {
  71   _iu_barriers->remove_if_existing(n);
  72 }
  73 
  74 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
  75   return _load_reference_barriers->length();
  76 }
  77 
  78 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
  79   return _load_reference_barriers->at(idx);
  80 }
  81 
  82 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
  83   assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
  84   _load_reference_barriers->append(n);
  85 }
  86 
  87 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
  88   if (_load_reference_barriers->contains(n)) {
  89     _load_reference_barriers->remove(n);
  90   }
  91 }
  92 
  93 Node* ShenandoahBarrierSetC2::shenandoah_iu_barrier(GraphKit* kit, Node* obj) const {
  94   if (ShenandoahIUBarrier) {
  95     return kit->gvn().transform(new ShenandoahIUBarrierNode(obj));
  96   }
  97   return obj;
  98 }
  99 
 100 #define __ kit->
 101 
 102 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseValues* phase, Node* adr,
 103                                                          BasicType bt, uint adr_idx) const {
 104   intptr_t offset = 0;
 105   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 106   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
 107 
 108   if (offset == Type::OffsetBot) {
 109     return false; // cannot unalias unless there are precise offsets
 110   }
 111 
 112   if (alloc == nullptr) {
 113     return false; // No allocation found
 114   }
 115 
 116   intptr_t size_in_bytes = type2aelembytes(bt);
 117 
 118   Node* mem = __ memory(adr_idx); // start searching here...
 119 
 120   for (int cnt = 0; cnt < 50; cnt++) {
 121 
 122     if (mem->is_Store()) {
 123 
 124       Node* st_adr = mem->in(MemNode::Address);
 125       intptr_t st_offset = 0;
 126       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 127 
 128       if (st_base == nullptr) {
 129         break; // inscrutable pointer
 130       }
 131 
 132       // Break we have found a store with same base and offset as ours so break
 133       if (st_base == base && st_offset == offset) {
 134         break;
 135       }
 136 
 137       if (st_offset != offset && st_offset != Type::OffsetBot) {
 138         const int MAX_STORE = BytesPerLong;
 139         if (st_offset >= offset + size_in_bytes ||
 140             st_offset <= offset - MAX_STORE ||
 141             st_offset <= offset - mem->as_Store()->memory_size()) {
 142           // Success:  The offsets are provably independent.
 143           // (You may ask, why not just test st_offset != offset and be done?
 144           // The answer is that stores of different sizes can co-exist
 145           // in the same sequence of RawMem effects.  We sometimes initialize
 146           // a whole 'tile' of array elements with a single jint or jlong.)
 147           mem = mem->in(MemNode::Memory);
 148           continue; // advance through independent store memory
 149         }
 150       }
 151 
 152       if (st_base != base
 153           && MemNode::detect_ptr_independence(base, alloc, st_base,
 154                                               AllocateNode::Ideal_allocation(st_base, phase),
 155                                               phase)) {
 156         // Success:  The bases are provably independent.
 157         mem = mem->in(MemNode::Memory);
 158         continue; // advance through independent store memory
 159       }
 160     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 161 
 162       InitializeNode* st_init = mem->in(0)->as_Initialize();
 163       AllocateNode* st_alloc = st_init->allocation();
 164 
 165       // Make sure that we are looking at the same allocation site.
 166       // The alloc variable is guaranteed to not be null here from earlier check.
 167       if (alloc == st_alloc) {
 168         // Check that the initialization is storing null so that no previous store
 169         // has been moved up and directly write a reference
 170         Node* captured_store = st_init->find_captured_store(offset,
 171                                                             type2aelembytes(T_OBJECT),
 172                                                             phase);
 173         if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
 174           return true;
 175         }
 176       }
 177     }
 178 
 179     // Unless there is an explicit 'continue', we must bail out here,
 180     // because 'mem' is an inscrutable memory state (e.g., a call).
 181     break;
 182   }
 183 
 184   return false;
 185 }
 186 
 187 #undef __
 188 #define __ ideal.
 189 
 190 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
 191                                                     bool do_load,
 192                                                     Node* obj,
 193                                                     Node* adr,
 194                                                     uint alias_idx,
 195                                                     Node* val,
 196                                                     const TypeOopPtr* val_type,
 197                                                     Node* pre_val,
 198                                                     BasicType bt) const {
 199   // Some sanity checks
 200   // Note: val is unused in this routine.
 201 
 202   if (do_load) {
 203     // We need to generate the load of the previous value
 204     assert(adr != nullptr, "where are loading from?");
 205     assert(pre_val == nullptr, "loaded already?");
 206     assert(val_type != nullptr, "need a type");
 207 
 208     if (ReduceInitialCardMarks
 209         && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 210       return;
 211     }
 212 
 213   } else {
 214     // In this case both val_type and alias_idx are unused.
 215     assert(pre_val != nullptr, "must be loaded already");
 216     // Nothing to be done if pre_val is null.
 217     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 218     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 219   }
 220   assert(bt == T_OBJECT, "or we shouldn't be here");
 221 
 222   IdealKit ideal(kit, true);
 223 
 224   Node* tls = __ thread(); // ThreadLocalStorage
 225 
 226   Node* no_base = __ top();
 227   Node* zero  = __ ConI(0);
 228   Node* zeroX = __ ConX(0);
 229 
 230   float likely  = PROB_LIKELY(0.999);
 231   float unlikely  = PROB_UNLIKELY(0.999);
 232 
 233   // Offsets into the thread
 234   const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
 235   const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
 236 
 237   // Now the actual pointers into the thread
 238   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 239   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 240 
 241   // Now some of the values
 242   Node* marking;
 243   Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
 244   Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
 245   marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
 246   assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
 247 
 248   // if (!marking)
 249   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 250     BasicType index_bt = TypeX_X->basic_type();
 251     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
 252     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 253 
 254     if (do_load) {
 255       // load original value
 256       // alias_idx correct??
 257       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
 258     }
 259 
 260     // if (pre_val != nullptr)
 261     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 262       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 263 
 264       // is the queue for this thread full?
 265       __ if_then(index, BoolTest::ne, zeroX, likely); {
 266 
 267         // decrement the index
 268         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 269 
 270         // Now get the buffer location we will log the previous value into and store it
 271         Node *log_addr = __ AddP(no_base, buffer, next_index);
 272         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 273         // update the index
 274         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 275 
 276       } __ else_(); {
 277 
 278         // logging buffer is full, call the runtime
 279         const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
 280         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
 281       } __ end_if();  // (!index)
 282     } __ end_if();  // (pre_val != nullptr)
 283   } __ end_if();  // (!marking)
 284 
 285   // Final sync IdealKit and GraphKit.
 286   kit->final_sync(ideal);
 287 
 288   if (ShenandoahSATBBarrier && adr != nullptr) {
 289     Node* c = kit->control();
 290     Node* call = c->in(1)->in(1)->in(1)->in(0);
 291     assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
 292     call->add_req(adr);
 293   }
 294 }
 295 
 296 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
 297   return call->is_CallLeaf() &&
 298          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry);
 299 }
 300 
 301 bool ShenandoahBarrierSetC2::is_shenandoah_clone_call(Node* call) {
 302   return call->is_CallLeaf() &&
 303          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier);
 304 }
 305 
 306 bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) {
 307   if (!call->is_CallLeaf()) {
 308     return false;
 309   }
 310 
 311   address entry_point = call->as_CallLeaf()->entry_point();
 312   return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)) ||
 313          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)) ||
 314          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)) ||
 315          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)) ||
 316          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)) ||
 317          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow));
 318 }
 319 
 320 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseValues* phase, Node* n) {
 321   if (n->Opcode() != Op_If) {
 322     return false;
 323   }
 324 
 325   Node* bol = n->in(1);
 326   assert(bol->is_Bool(), "");
 327   Node* cmpx = bol->in(1);
 328   if (bol->as_Bool()->_test._test == BoolTest::ne &&
 329       cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
 330       is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
 331       cmpx->in(1)->in(2)->is_Con() &&
 332       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
 333     return true;
 334   }
 335 
 336   return false;
 337 }
 338 
 339 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
 340   if (!n->is_Load()) return false;
 341   const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
 342   return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
 343          && n->in(2)->in(3)->is_Con()
 344          && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
 345 }
 346 
 347 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
 348                                                           bool do_load,
 349                                                           Node* obj,
 350                                                           Node* adr,
 351                                                           uint alias_idx,
 352                                                           Node* val,
 353                                                           const TypeOopPtr* val_type,
 354                                                           Node* pre_val,
 355                                                           BasicType bt) const {
 356   if (ShenandoahSATBBarrier) {
 357     IdealKit ideal(kit);
 358     kit->sync_kit(ideal);
 359 
 360     satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
 361 
 362     ideal.sync_kit(kit);
 363     kit->final_sync(ideal);
 364   }
 365 }
 366 
 367 // Helper that guards and inserts a pre-barrier.
 368 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 369                                                 Node* pre_val, bool need_mem_bar) const {
 370   // We could be accessing the referent field of a reference object. If so, when Shenandoah
 371   // is enabled, we need to log the value in the referent field in an SATB buffer.
 372   // This routine performs some compile time filters and generates suitable
 373   // runtime filters that guard the pre-barrier code.
 374   // Also add memory barrier for non volatile load from the referent field
 375   // to prevent commoning of loads across safepoint.
 376 
 377   // Some compile time checks.
 378 
 379   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 380   const TypeX* otype = offset->find_intptr_t_type();
 381   if (otype != nullptr && otype->is_con() &&
 382       otype->get_con() != java_lang_ref_Reference::referent_offset()) {
 383     // Constant offset but not the reference_offset so just return
 384     return;
 385   }
 386 
 387   // We only need to generate the runtime guards for instances.
 388   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 389   if (btype != nullptr) {
 390     if (btype->isa_aryptr()) {
 391       // Array type so nothing to do
 392       return;
 393     }
 394 
 395     const TypeInstPtr* itype = btype->isa_instptr();
 396     if (itype != nullptr) {
 397       // Can the klass of base_oop be statically determined to be
 398       // _not_ a sub-class of Reference and _not_ Object?
 399       ciKlass* klass = itype->instance_klass();
 400       if (klass->is_loaded() &&
 401           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 402           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 403         return;
 404       }
 405     }
 406   }
 407 
 408   // The compile time filters did not reject base_oop/offset so
 409   // we need to generate the following runtime filters
 410   //
 411   // if (offset == java_lang_ref_Reference::_reference_offset) {
 412   //   if (instance_of(base, java.lang.ref.Reference)) {
 413   //     pre_barrier(_, pre_val, ...);
 414   //   }
 415   // }
 416 
 417   float likely   = PROB_LIKELY(  0.999);
 418   float unlikely = PROB_UNLIKELY(0.999);
 419 
 420   IdealKit ideal(kit);
 421 
 422   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset());
 423 
 424   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 425       // Update graphKit memory and control from IdealKit.
 426       kit->sync_kit(ideal);
 427 
 428       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 429       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 430 
 431       // Update IdealKit memory and control from graphKit.
 432       __ sync_kit(kit);
 433 
 434       Node* one = __ ConI(1);
 435       // is_instof == 0 if base_oop == nullptr
 436       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 437 
 438         // Update graphKit from IdeakKit.
 439         kit->sync_kit(ideal);
 440 
 441         // Use the pre-barrier to record the value in the referent field
 442         satb_write_barrier_pre(kit, false /* do_load */,
 443                                nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 444                                pre_val /* pre_val */,
 445                                T_OBJECT);
 446         if (need_mem_bar) {
 447           // Add memory barrier to prevent commoning reads from this field
 448           // across safepoint since GC can change its value.
 449           kit->insert_mem_bar(Op_MemBarCPUOrder);
 450         }
 451         // Update IdealKit from graphKit.
 452         __ sync_kit(kit);
 453 
 454       } __ end_if(); // _ref_type != ref_none
 455   } __ end_if(); // offset == referent_offset
 456 
 457   // Final sync IdealKit and GraphKit.
 458   kit->final_sync(ideal);
 459 }
 460 
 461 Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
 462   BarrierSet* bs = BarrierSet::barrier_set();
 463   ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
 464   CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base();
 465   if (card_table_base != nullptr) {
 466     return kit->makecon(TypeRawPtr::make((address)card_table_base));
 467   } else {
 468     return kit->null();
 469   }
 470 }
 471 
 472 void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
 473                                           Node* ctl,
 474                                           Node* oop_store,
 475                                           Node* obj,
 476                                           Node* adr,
 477                                           uint  adr_idx,
 478                                           Node* val,
 479                                           BasicType bt,
 480                                           bool use_precise) const {
 481   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 482 
 483   // No store check needed if we're storing a null.
 484   if (val != nullptr && val->is_Con()) {
 485     // must be either an oop or NULL
 486     const Type* t = val->bottom_type();
 487     if (t == TypePtr::NULL_PTR || t == Type::TOP)
 488       return;
 489   }
 490 
 491   if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
 492     // We can skip marks on a freshly-allocated object in Eden.
 493     // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
 494     // That routine informs GC to take appropriate compensating steps,
 495     // upon a slow-path allocation, so as to make this card-mark
 496     // elision safe.
 497     return;
 498   }
 499 
 500   if (!use_precise) {
 501     // All card marks for a (non-array) instance are in one place:
 502     adr = obj;
 503   }
 504   // (Else it's an array (or unknown), and we want more precise card marks.)
 505   assert(adr != nullptr, "");
 506 
 507   IdealKit ideal(kit, true);
 508 
 509   // Convert the pointer to an int prior to doing math on it
 510   Node* cast = __ CastPX(__ ctrl(), adr);
 511 
 512   // Divide by card size
 513   Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );
 514 
 515   // Combine card table base and card offset
 516   Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
 517 
 518   // Get the alias_index for raw card-mark memory
 519   int adr_type = Compile::AliasIdxRaw;
 520   Node*   zero = __ ConI(0); // Dirty card value
 521 
 522   if (UseCondCardMark) {
 523     // The classic GC reference write barrier is typically implemented
 524     // as a store into the global card mark table.  Unfortunately
 525     // unconditional stores can result in false sharing and excessive
 526     // coherence traffic as well as false transactional aborts.
 527     // UseCondCardMark enables MP "polite" conditional card mark
 528     // stores.  In theory we could relax the load from ctrl() to
 529     // no_ctrl, but that doesn't buy much latitude.
 530     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
 531     __ if_then(card_val, BoolTest::ne, zero);
 532   }
 533 
 534   // Smash zero into card
 535   __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
 536 
 537   if (UseCondCardMark) {
 538     __ end_if();
 539   }
 540 
 541   // Final sync IdealKit and GraphKit.
 542   kit->final_sync(ideal);
 543 }
 544 
 545 #undef __
 546 
 547 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
 548   const Type **fields = TypeTuple::fields(2);
 549   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
 550   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
 551   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 552 
 553   // create result type (range)
 554   fields = TypeTuple::fields(0);
 555   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 556 
 557   return TypeFunc::make(domain, range);
 558 }
 559 
 560 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
 561   const Type **fields = TypeTuple::fields(1);
 562   fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
 563   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 564 
 565   // create result type (range)
 566   fields = TypeTuple::fields(0);
 567   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
 568 
 569   return TypeFunc::make(domain, range);
 570 }
 571 
 572 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() {
 573   const Type **fields = TypeTuple::fields(2);
 574   fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM; // original field value
 575   fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address
 576 
 577   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
 578 
 579   // create result type (range)
 580   fields = TypeTuple::fields(1);
 581   fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM;
 582   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 583 
 584   return TypeFunc::make(domain, range);
 585 }
 586 
 587 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 588   DecoratorSet decorators = access.decorators();
 589 
 590   const TypePtr* adr_type = access.addr().type();
 591   Node* adr = access.addr().node();
 592 
 593   if (!access.is_oop()) {
 594     return BarrierSetC2::store_at_resolved(access, val);
 595   }
 596 
 597   if (access.is_parse_access()) {
 598     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 599     GraphKit* kit = parse_access.kit();
 600 
 601     uint adr_idx = kit->C->get_alias_index(adr_type);
 602     assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
 603     Node* value = val.node();
 604     value = shenandoah_iu_barrier(kit, value);
 605     val.set_node(value);
 606     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 607                                  static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
 608 
 609     Node* result = BarrierSetC2::store_at_resolved(access, val);
 610 
 611     if (ShenandoahCardBarrier) {
 612       const bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 613       const bool is_array = (decorators & IS_ARRAY) != 0;
 614       const bool use_precise = is_array || anonymous;
 615       post_barrier(kit, kit->control(), access.raw_access(), access.base(),
 616                    adr, adr_idx, val.node(), access.type(), use_precise);
 617     }
 618     return result;
 619   } else {
 620     assert(access.is_opt_access(), "only for optimization passes");
 621     assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
 622     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 623     PhaseGVN& gvn =  opt_access.gvn();
 624 
 625     if (ShenandoahIUBarrier) {
 626       Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
 627       val.set_node(enqueue);
 628     }
 629     return BarrierSetC2::store_at_resolved(access, val);
 630   }
 631 }
 632 
 633 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 634   // 1: non-reference load, no additional barrier is needed
 635   if (!access.is_oop()) {
 636     return BarrierSetC2::load_at_resolved(access, val_type);
 637   }
 638 
 639   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 640   DecoratorSet decorators = access.decorators();
 641   BasicType type = access.type();
 642 
 643   // 2: apply LRB if needed
 644   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 645     load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
 646     if (access.is_parse_access()) {
 647       load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 648     } else {
 649       load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 650     }
 651   }
 652 
 653   // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
 654   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 655     Node* top = Compile::current()->top();
 656     Node* adr = access.addr().node();
 657     Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 658     Node* obj = access.base();
 659 
 660     bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 661     bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 662     bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 663 
 664     // If we are reading the value of the referent field of a Reference
 665     // object (either by using Unsafe directly or through reflection)
 666     // then, if SATB is enabled, we need to record the referent in an
 667     // SATB log buffer using the pre-barrier mechanism.
 668     // Also we need to add memory barrier to prevent commoning reads
 669     // from this field across safepoint since GC can change its value.
 670     if (!on_weak_ref || (unknown && (offset == top || obj == top)) || !keep_alive) {
 671       return load;
 672     }
 673 
 674     assert(access.is_parse_access(), "entry not supported at optimization time");
 675     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 676     GraphKit* kit = parse_access.kit();
 677     bool mismatched = (decorators & C2_MISMATCHED) != 0;
 678     bool is_unordered = (decorators & MO_UNORDERED) != 0;
 679     bool in_native = (decorators & IN_NATIVE) != 0;
 680     bool need_cpu_mem_bar = !is_unordered || mismatched || in_native;
 681 
 682     if (on_weak_ref) {
 683       // Use the pre-barrier to record the value in the referent field
 684       satb_write_barrier_pre(kit, false /* do_load */,
 685                              nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 686                              load /* pre_val */, T_OBJECT);
 687       // Add memory barrier to prevent commoning reads from this field
 688       // across safepoint since GC can change its value.
 689       kit->insert_mem_bar(Op_MemBarCPUOrder);
 690     } else if (unknown) {
 691       // We do not require a mem bar inside pre_barrier if need_mem_bar
 692       // is set: the barriers would be emitted by us.
 693       insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 694     }
 695   }
 696 
 697   return load;
 698 }
 699 
 700 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 701                                                              Node* new_val, const Type* value_type) const {
 702   GraphKit* kit = access.kit();
 703   if (access.is_oop()) {
 704     new_val = shenandoah_iu_barrier(kit, new_val);
 705     shenandoah_write_barrier_pre(kit, false /* do_load */,
 706                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 707                                  expected_val /* pre_val */, T_OBJECT);
 708 
 709     MemNode::MemOrd mo = access.mem_node_mo();
 710     Node* mem = access.memory();
 711     Node* adr = access.addr().node();
 712     const TypePtr* adr_type = access.addr().type();
 713     Node* load_store = nullptr;
 714 
 715 #ifdef _LP64
 716     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 717       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 718       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 719       if (ShenandoahCASBarrier) {
 720         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 721       } else {
 722         load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
 723       }
 724     } else
 725 #endif
 726     {
 727       if (ShenandoahCASBarrier) {
 728         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 729       } else {
 730         load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
 731       }
 732     }
 733 
 734     access.set_raw_access(load_store);
 735     pin_atomic_op(access);
 736 
 737 #ifdef _LP64
 738     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 739       load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
 740     }
 741 #endif
 742     load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
 743     if (ShenandoahCardBarrier) {
 744       post_barrier(kit, kit->control(), access.raw_access(), access.base(),
 745                    access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
 746     }
 747     return load_store;
 748   }
 749   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
 750 }
 751 
 752 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 753                                                               Node* new_val, const Type* value_type) const {
 754   GraphKit* kit = access.kit();
 755   if (access.is_oop()) {
 756     new_val = shenandoah_iu_barrier(kit, new_val);
 757     shenandoah_write_barrier_pre(kit, false /* do_load */,
 758                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 759                                  expected_val /* pre_val */, T_OBJECT);
 760     DecoratorSet decorators = access.decorators();
 761     MemNode::MemOrd mo = access.mem_node_mo();
 762     Node* mem = access.memory();
 763     bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
 764     Node* load_store = nullptr;
 765     Node* adr = access.addr().node();
 766 #ifdef _LP64
 767     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 768       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
 769       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
 770       if (ShenandoahCASBarrier) {
 771         if (is_weak_cas) {
 772           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 773         } else {
 774           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 775         }
 776       } else {
 777         if (is_weak_cas) {
 778           load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 779         } else {
 780           load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
 781         }
 782       }
 783     } else
 784 #endif
 785     {
 786       if (ShenandoahCASBarrier) {
 787         if (is_weak_cas) {
 788           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 789         } else {
 790           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 791         }
 792       } else {
 793         if (is_weak_cas) {
 794           load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 795         } else {
 796           load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
 797         }
 798       }
 799     }
 800     access.set_raw_access(load_store);
 801     pin_atomic_op(access);
 802     if (ShenandoahCardBarrier) {
 803       post_barrier(kit, kit->control(), access.raw_access(), access.base(),
 804                    access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
 805     }
 806     return load_store;
 807   }
 808   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 809 }
 810 
 811 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
 812   GraphKit* kit = access.kit();
 813   if (access.is_oop()) {
 814     val = shenandoah_iu_barrier(kit, val);
 815   }
 816   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
 817   if (access.is_oop()) {
 818     result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
 819     shenandoah_write_barrier_pre(kit, false /* do_load */,
 820                                  nullptr, nullptr, max_juint, nullptr, nullptr,
 821                                  result /* pre_val */, T_OBJECT);
 822     if (ShenandoahCardBarrier) {
 823       post_barrier(kit, kit->control(), access.raw_access(), access.base(),
 824                    access.addr().node(), access.alias_idx(), val, T_OBJECT, true);
 825     }
 826   }
 827   return result;
 828 }
 829 
 830 
 831 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
 832   return is_shenandoah_wb_pre_call(node);
 833 }
 834 
 835 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
 836   return (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) || (node->Opcode() == Op_ShenandoahIUBarrier) ||
 837          is_shenandoah_lrb_call(node) ||
 838          is_shenandoah_wb_pre_call(node) ||
 839          is_shenandoah_clone_call(node);
 840 }
 841 
 842 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
 843   if (c == nullptr) {
 844     return c;
 845   }
 846   if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 847     return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
 848   }
 849   if (c->Opcode() == Op_ShenandoahIUBarrier) {
 850     c = c->in(1);
 851   }
 852   return c;
 853 }
 854 
 855 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
 856   return !ShenandoahBarrierC2Support::expand(C, igvn);
 857 }
 858 
 859 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
 860   if (mode == LoopOptsShenandoahExpand) {
 861     assert(UseShenandoahGC, "only for shenandoah");
 862     ShenandoahBarrierC2Support::pin_and_expand(phase);
 863     return true;
 864   } else if (mode == LoopOptsShenandoahPostExpand) {
 865     assert(UseShenandoahGC, "only for shenandoah");
 866     visited.clear();
 867     ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
 868     return true;
 869   }
 870   return false;
 871 }
 872 
 873 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
 874   bool is_oop = is_reference_type(type);
 875   if (!is_oop) {
 876     return false;
 877   }
 878   if (ShenandoahSATBBarrier && tightly_coupled_alloc) {
 879     if (phase == Optimization) {
 880       return false;
 881     }
 882     return !is_clone;
 883   }
 884   if (phase == Optimization) {
 885     return !ShenandoahIUBarrier;
 886   }
 887   return true;
 888 }
 889 
 890 bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
 891   const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
 892   if (src_type->isa_instptr() != nullptr) {
 893     ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
 894     if ((src_type->klass_is_exact() || !ik->has_subklass()) && !ik->has_injected_fields()) {
 895       if (ik->has_object_fields()) {
 896         return true;
 897       } else {
 898         if (!src_type->klass_is_exact()) {
 899           Compile::current()->dependencies()->assert_leaf_type(ik);
 900         }
 901       }
 902     } else {
 903       return true;
 904         }
 905   } else if (src_type->isa_aryptr()) {
 906     BasicType src_elem = src_type->isa_aryptr()->elem()->array_element_basic_type();
 907     if (is_reference_type(src_elem, true)) {
 908       return true;
 909     }
 910   } else {
 911     return true;
 912   }
 913   return false;
 914 }
 915 
 916 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
 917   Node* ctrl = ac->in(TypeFunc::Control);
 918   Node* mem = ac->in(TypeFunc::Memory);
 919   Node* src_base = ac->in(ArrayCopyNode::Src);
 920   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
 921   Node* dest_base = ac->in(ArrayCopyNode::Dest);
 922   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
 923   Node* length = ac->in(ArrayCopyNode::Length);
 924 
 925   Node* src = phase->basic_plus_adr(src_base, src_offset);
 926   Node* dest = phase->basic_plus_adr(dest_base, dest_offset);
 927 
 928   if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
 929     // Check if heap is has forwarded objects. If it does, we need to call into the special
 930     // routine that would fix up source references before we can continue.
 931 
 932     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
 933     Node* region = new RegionNode(PATH_LIMIT);
 934     Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
 935 
 936     Node* thread = phase->transform_later(new ThreadLocalNode());
 937     Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 938     Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
 939 
 940     uint gc_state_idx = Compile::AliasIdxRaw;
 941     const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
 942     debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
 943 
 944     Node* gc_state    = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
 945     int flags = ShenandoahHeap::HAS_FORWARDED;
 946     if (ShenandoahIUBarrier) {
 947       flags |= ShenandoahHeap::MARKING;
 948     }
 949     Node* stable_and  = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(flags)));
 950     Node* stable_cmp  = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
 951     Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
 952 
 953     IfNode* stable_iff  = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
 954     Node* stable_ctrl   = phase->transform_later(new IfFalseNode(stable_iff));
 955     Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
 956 
 957     // Heap is stable, no need to do anything additional
 958     region->init_req(_heap_stable, stable_ctrl);
 959     mem_phi->init_req(_heap_stable, mem);
 960 
 961     // Heap is unstable, call into clone barrier stub
 962     Node* call = phase->make_leaf_call(unstable_ctrl, mem,
 963                     ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
 964                     CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
 965                     "shenandoah_clone",
 966                     TypeRawPtr::BOTTOM,
 967                     src_base);
 968     call = phase->transform_later(call);
 969 
 970     ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control));
 971     mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory));
 972     region->init_req(_heap_unstable, ctrl);
 973     mem_phi->init_req(_heap_unstable, mem);
 974 
 975     // Wire up the actual arraycopy stub now
 976     ctrl = phase->transform_later(region);
 977     mem = phase->transform_later(mem_phi);
 978 
 979     const char* name = "arraycopy";
 980     call = phase->make_leaf_call(ctrl, mem,
 981                                  OptoRuntime::fast_arraycopy_Type(),
 982                                  phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, name, true),
 983                                  name, TypeRawPtr::BOTTOM,
 984                                  src, dest, length
 985                                  LP64_ONLY(COMMA phase->top()));
 986     call = phase->transform_later(call);
 987 
 988     // Hook up the whole thing into the graph
 989     phase->igvn().replace_node(ac, call);
 990   } else {
 991     BarrierSetC2::clone_at_expansion(phase, ac);
 992   }
 993 }
 994 
 995 
 996 // Support for macro expanded GC barriers
 997 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
 998   if (node->Opcode() == Op_ShenandoahIUBarrier) {
 999     state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
1000   }
1001   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1002     state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1003   }
1004 }
1005 
1006 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
1007   if (node->Opcode() == Op_ShenandoahIUBarrier) {
1008     state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
1009   }
1010   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1011     state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1012   }
1013 }
1014 
1015 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
1016   if (is_shenandoah_wb_pre_call(node)) {
1017     shenandoah_eliminate_wb_pre(node, &macro->igvn());
1018   }
1019   if (ShenandoahCardBarrier && node->Opcode() == Op_CastP2X) {
1020     Node* shift = node->unique_out();
1021     Node* addp = shift->unique_out();
1022     for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
1023       Node* mem = addp->last_out(j);
1024       if (UseCondCardMark && mem->is_Load()) {
1025         assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
1026         // The load is checking if the card has been written so
1027         // replace it with zero to fold the test.
1028         macro->replace_node(mem, macro->intcon(0));
1029         continue;
1030       }
1031       assert(mem->is_Store(), "store required");
1032       macro->replace_node(mem, mem->in(MemNode::Memory));
1033     }
1034   }
1035 }
1036 
1037 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
1038   assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
1039   Node* c = call->as_Call()->proj_out(TypeFunc::Control);
1040   c = c->unique_ctrl_out();
1041   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1042   c = c->unique_ctrl_out();
1043   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1044   Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1045   assert(iff->is_If(), "expect test");
1046   if (!is_shenandoah_marking_if(igvn, iff)) {
1047     c = c->unique_ctrl_out();
1048     assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1049     iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1050     assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
1051   }
1052   Node* cmpx = iff->in(1)->in(1);
1053   igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
1054   igvn->rehash_node_delayed(call);
1055   call->del_req(call->req()-1);
1056 }
1057 
1058 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
1059   if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) {
1060     igvn->add_users_to_worklist(node);
1061   }
1062 }
1063 
1064 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
1065   for (uint i = 0; i < useful.size(); i++) {
1066     Node* n = useful.at(i);
1067     if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) {
1068       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1069         C->record_for_igvn(n->fast_out(i));
1070       }
1071     }
1072   }
1073   for (int i = state()->iu_barriers_count() - 1; i >= 0; i--) {
1074     ShenandoahIUBarrierNode* n = state()->iu_barrier(i);
1075     if (!useful.member(n)) {
1076       state()->remove_iu_barrier(n);
1077     }
1078   }
1079   for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
1080     ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
1081     if (!useful.member(n)) {
1082       state()->remove_load_reference_barrier(n);
1083     }
1084   }
1085 }
1086 
1087 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
1088   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
1089 }
1090 
1091 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
1092   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
1093 }
1094 
1095 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
1096 // expanded later, then now is the time to do so.
1097 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
1098 
1099 #ifdef ASSERT
1100 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
1101   if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeMacroExpand) {
1102     ShenandoahBarrierC2Support::verify(Compile::current()->root());
1103   } else if (phase == BarrierSetC2::BeforeCodeGen) {
1104     // Verify Shenandoah pre-barriers
1105     const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
1106 
1107     Unique_Node_List visited;
1108     Node_List worklist;
1109     // We're going to walk control flow backwards starting from the Root
1110     worklist.push(compile->root());
1111     while (worklist.size() > 0) {
1112       Node *x = worklist.pop();
1113       if (x == nullptr || x == compile->top()) continue;
1114       if (visited.member(x)) {
1115         continue;
1116       } else {
1117         visited.push(x);
1118       }
1119 
1120       if (x->is_Region()) {
1121         for (uint i = 1; i < x->req(); i++) {
1122           worklist.push(x->in(i));
1123         }
1124       } else {
1125         worklist.push(x->in(0));
1126         // We are looking for the pattern:
1127         //                            /->ThreadLocal
1128         // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
1129         //              \->ConI(0)
1130         // We want to verify that the If and the LoadB have the same control
1131         // See GraphKit::g1_write_barrier_pre()
1132         if (x->is_If()) {
1133           IfNode *iff = x->as_If();
1134           if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
1135             CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
1136             if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
1137                 && cmp->in(1)->is_Load()) {
1138               LoadNode *load = cmp->in(1)->as_Load();
1139               if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
1140                   && load->in(2)->in(3)->is_Con()
1141                   && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
1142 
1143                 Node *if_ctrl = iff->in(0);
1144                 Node *load_ctrl = load->in(0);
1145 
1146                 if (if_ctrl != load_ctrl) {
1147                   // Skip possible CProj->NeverBranch in infinite loops
1148                   if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
1149                       && if_ctrl->in(0)->is_NeverBranch()) {
1150                     if_ctrl = if_ctrl->in(0)->in(0);
1151                   }
1152                 }
1153                 assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
1154               }
1155             }
1156           }
1157         }
1158       }
1159     }
1160   }
1161 }
1162 #endif
1163 
1164 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const {
1165   if (is_shenandoah_wb_pre_call(n)) {
1166     uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
1167     if (n->req() > cnt) {
1168       Node* addp = n->in(cnt);
1169       if (has_only_shenandoah_wb_pre_uses(addp)) {
1170         n->del_req(cnt);
1171         if (can_reshape) {
1172           phase->is_IterGVN()->_worklist.push(addp);
1173         }
1174         return n;
1175       }
1176     }
1177   }
1178   if (n->Opcode() == Op_CmpP) {
1179     Node* in1 = n->in(1);
1180     Node* in2 = n->in(2);
1181 
1182     // If one input is null, then step over the strong LRB barriers on the other input
1183     if (in1->bottom_type() == TypePtr::NULL_PTR &&
1184         !((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
1185           !ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in2)->decorators()))) {
1186       in2 = step_over_gc_barrier(in2);
1187     }
1188     if (in2->bottom_type() == TypePtr::NULL_PTR &&
1189         !((in1->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
1190           !ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in1)->decorators()))) {
1191       in1 = step_over_gc_barrier(in1);
1192     }
1193 
1194     if (in1 != n->in(1)) {
1195       n->set_req_X(1, in1, phase);
1196       assert(in2 == n->in(2), "only one change");
1197       return n;
1198     }
1199     if (in2 != n->in(2)) {
1200       n->set_req_X(2, in2, phase);
1201       return n;
1202     }
1203   } else if (can_reshape &&
1204              n->Opcode() == Op_If &&
1205              ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
1206              n->in(0) != nullptr &&
1207              n->outcnt() == 2) {
1208     Node* dom = n->in(0);
1209     Node* prev_dom = n;
1210     int op = n->Opcode();
1211     int dist = 16;
1212     // Search up the dominator tree for another heap stable test
1213     while (dom->Opcode() != op    ||  // Not same opcode?
1214            !ShenandoahBarrierC2Support::is_heap_stable_test(dom) ||  // Not same input 1?
1215            prev_dom->in(0) != dom) {  // One path of test does not dominate?
1216       if (dist < 0) return nullptr;
1217 
1218       dist--;
1219       prev_dom = dom;
1220       dom = IfNode::up_one_dom(dom);
1221       if (!dom) return nullptr;
1222     }
1223 
1224     // Check that we did not follow a loop back to ourselves
1225     if (n == dom) {
1226       return nullptr;
1227     }
1228 
1229     return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN(), false);
1230   }
1231 
1232   return nullptr;
1233 }
1234 
1235 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
1236   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1237     Node* u = n->fast_out(i);
1238     if (!is_shenandoah_wb_pre_call(u)) {
1239       return false;
1240     }
1241   }
1242   return n->outcnt() > 0;
1243 }
1244 
1245 bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode, Unique_Node_List& dead_nodes) const {
1246   switch (opcode) {
1247     case Op_CallLeaf:
1248     case Op_CallLeafNoFP: {
1249       assert (n->is_Call(), "");
1250       CallNode *call = n->as_Call();
1251       if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
1252         uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
1253         if (call->req() > cnt) {
1254           assert(call->req() == cnt + 1, "only one extra input");
1255           Node *addp = call->in(cnt);
1256           assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
1257           call->del_req(cnt);
1258         }
1259       }
1260       return false;
1261     }
1262     case Op_ShenandoahCompareAndSwapP:
1263     case Op_ShenandoahCompareAndSwapN:
1264     case Op_ShenandoahWeakCompareAndSwapN:
1265     case Op_ShenandoahWeakCompareAndSwapP:
1266     case Op_ShenandoahCompareAndExchangeP:
1267     case Op_ShenandoahCompareAndExchangeN:
1268       return true;
1269     case Op_ShenandoahLoadReferenceBarrier:
1270       assert(false, "should have been expanded already");
1271       return true;
1272     default:
1273       return false;
1274   }
1275 }
1276 
1277 bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
1278   switch (opcode) {
1279     case Op_ShenandoahCompareAndExchangeP:
1280     case Op_ShenandoahCompareAndExchangeN:
1281       conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
1282       // fallthrough
1283     case Op_ShenandoahWeakCompareAndSwapP:
1284     case Op_ShenandoahWeakCompareAndSwapN:
1285     case Op_ShenandoahCompareAndSwapP:
1286     case Op_ShenandoahCompareAndSwapN:
1287       conn_graph->add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1288       return true;
1289     case Op_StoreP: {
1290       Node* adr = n->in(MemNode::Address);
1291       const Type* adr_type = gvn->type(adr);
1292       // Pointer stores in Shenandoah barriers looks like unsafe access.
1293       // Ignore such stores to be able scalar replace non-escaping
1294       // allocations.
1295       if (adr_type->isa_rawptr() && adr->is_AddP()) {
1296         Node* base = conn_graph->get_addp_base(adr);
1297         if (base->Opcode() == Op_LoadP &&
1298           base->in(MemNode::Address)->is_AddP()) {
1299           adr = base->in(MemNode::Address);
1300           Node* tls = conn_graph->get_addp_base(adr);
1301           if (tls->Opcode() == Op_ThreadLocal) {
1302              int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1303              const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1304              if (offs == buf_offset) {
1305                return true; // Pre barrier previous oop value store.
1306              }
1307           }
1308         }
1309       }
1310       return false;
1311     }
1312     case Op_ShenandoahIUBarrier:
1313       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1314       break;
1315     case Op_ShenandoahLoadReferenceBarrier:
1316       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
1317       return true;
1318     default:
1319       // Nothing
1320       break;
1321   }
1322   return false;
1323 }
1324 
1325 bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
1326   switch (opcode) {
1327     case Op_ShenandoahCompareAndExchangeP:
1328     case Op_ShenandoahCompareAndExchangeN: {
1329       Node *adr = n->in(MemNode::Address);
1330       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, nullptr);
1331       // fallthrough
1332     }
1333     case Op_ShenandoahCompareAndSwapP:
1334     case Op_ShenandoahCompareAndSwapN:
1335     case Op_ShenandoahWeakCompareAndSwapP:
1336     case Op_ShenandoahWeakCompareAndSwapN:
1337       return conn_graph->add_final_edges_unsafe_access(n, opcode);
1338     case Op_ShenandoahIUBarrier:
1339       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1340       return true;
1341     case Op_ShenandoahLoadReferenceBarrier:
1342       conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), nullptr);
1343       return true;
1344     default:
1345       // Nothing
1346       break;
1347   }
1348   return false;
1349 }
1350 
1351 bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
1352   return n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) ||
1353          n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN);
1354 
1355 }
1356 
1357 bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
1358   switch (opcode) {
1359     case Op_ShenandoahCompareAndExchangeP:
1360     case Op_ShenandoahCompareAndExchangeN:
1361     case Op_ShenandoahWeakCompareAndSwapP:
1362     case Op_ShenandoahWeakCompareAndSwapN:
1363     case Op_ShenandoahCompareAndSwapP:
1364     case Op_ShenandoahCompareAndSwapN: {   // Convert trinary to binary-tree
1365       Node* newval = n->in(MemNode::ValueIn);
1366       Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
1367       Node* pair = new BinaryNode(oldval, newval);
1368       n->set_req(MemNode::ValueIn,pair);
1369       n->del_req(LoadStoreConditionalNode::ExpectedIn);
1370       return true;
1371     }
1372     default:
1373       break;
1374   }
1375   return false;
1376 }
1377 
1378 bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) const {
1379   return xop == Op_ShenandoahCompareAndExchangeP ||
1380          xop == Op_ShenandoahCompareAndExchangeN ||
1381          xop == Op_ShenandoahWeakCompareAndSwapP ||
1382          xop == Op_ShenandoahWeakCompareAndSwapN ||
1383          xop == Op_ShenandoahCompareAndSwapN ||
1384          xop == Op_ShenandoahCompareAndSwapP;
1385 }