1 /*
   2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #if INCLUDE_CDS
  28 #include "code/SCCache.hpp"
  29 #endif
  30 #include "gc/g1/c2/g1BarrierSetC2.hpp"
  31 #include "gc/g1/g1BarrierSet.hpp"
  32 #include "gc/g1/g1BarrierSetRuntime.hpp"
  33 #include "gc/g1/g1CardTable.hpp"
  34 #include "gc/g1/g1ThreadLocalData.hpp"
  35 #include "gc/g1/g1HeapRegion.hpp"
  36 #include "opto/arraycopynode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/graphKit.hpp"
  40 #include "opto/idealKit.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/type.hpp"
  44 #include "utilities/macros.hpp"
  45 
  46 const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
  47   const Type **fields = TypeTuple::fields(2);
  48   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
  49   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
  50   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  51 
  52   // create result type (range)
  53   fields = TypeTuple::fields(0);
  54   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
  55 
  56   return TypeFunc::make(domain, range);
  57 }
  58 
  59 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
  60   const Type **fields = TypeTuple::fields(2);
  61   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
  62   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
  63   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
  64 
  65   // create result type (range)
  66   fields = TypeTuple::fields(0);
  67   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
  68 
  69   return TypeFunc::make(domain, range);
  70 }
  71 
  72 #define __ ideal.
  73 /*
  74  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
  75  * required by SATB to make sure all objects live at the start of the
  76  * marking are kept alive, all reference updates need to any previous
  77  * reference stored before writing.
  78  *
  79  * If the previous value is null there is no need to save the old value.
  80  * References that are null are filtered during runtime by the barrier
  81  * code to avoid unnecessary queuing.
  82  *
  83  * However in the case of newly allocated objects it might be possible to
  84  * prove that the reference about to be overwritten is null during compile
  85  * time and avoid adding the barrier code completely.
  86  *
  87  * The compiler needs to determine that the object in which a field is about
  88  * to be written is newly allocated, and that no prior store to the same field
  89  * has happened since the allocation.
  90  *
  91  * Returns true if the pre-barrier can be removed
  92  */
  93 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
  94                                                PhaseValues* phase,
  95                                                Node* adr,
  96                                                BasicType bt,
  97                                                uint adr_idx) const {
  98   intptr_t offset = 0;
  99   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 100   AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
 101 
 102   if (offset == Type::OffsetBot) {
 103     return false; // cannot unalias unless there are precise offsets
 104   }
 105 
 106   if (alloc == nullptr) {
 107     return false; // No allocation found
 108   }
 109 
 110   intptr_t size_in_bytes = type2aelembytes(bt);
 111 
 112   Node* mem = kit->memory(adr_idx); // start searching here...
 113 
 114   for (int cnt = 0; cnt < 50; cnt++) {
 115 
 116     if (mem->is_Store()) {
 117 
 118       Node* st_adr = mem->in(MemNode::Address);
 119       intptr_t st_offset = 0;
 120       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 121 
 122       if (st_base == nullptr) {
 123         break; // inscrutable pointer
 124       }
 125 
 126       // Break we have found a store with same base and offset as ours so break
 127       if (st_base == base && st_offset == offset) {
 128         break;
 129       }
 130 
 131       if (st_offset != offset && st_offset != Type::OffsetBot) {
 132         const int MAX_STORE = BytesPerLong;
 133         if (st_offset >= offset + size_in_bytes ||
 134             st_offset <= offset - MAX_STORE ||
 135             st_offset <= offset - mem->as_Store()->memory_size()) {
 136           // Success:  The offsets are provably independent.
 137           // (You may ask, why not just test st_offset != offset and be done?
 138           // The answer is that stores of different sizes can co-exist
 139           // in the same sequence of RawMem effects.  We sometimes initialize
 140           // a whole 'tile' of array elements with a single jint or jlong.)
 141           mem = mem->in(MemNode::Memory);
 142           continue; // advance through independent store memory
 143         }
 144       }
 145 
 146       if (st_base != base
 147           && MemNode::detect_ptr_independence(base, alloc, st_base,
 148                                               AllocateNode::Ideal_allocation(st_base),
 149                                               phase)) {
 150         // Success:  The bases are provably independent.
 151         mem = mem->in(MemNode::Memory);
 152         continue; // advance through independent store memory
 153       }
 154     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 155 
 156       InitializeNode* st_init = mem->in(0)->as_Initialize();
 157       AllocateNode* st_alloc = st_init->allocation();
 158 
 159       // Make sure that we are looking at the same allocation site.
 160       // The alloc variable is guaranteed to not be null here from earlier check.
 161       if (alloc == st_alloc) {
 162         // Check that the initialization is storing null so that no previous store
 163         // has been moved up and directly write a reference
 164         Node* captured_store = st_init->find_captured_store(offset,
 165                                                             type2aelembytes(T_OBJECT),
 166                                                             phase);
 167         if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
 168           return true;
 169         }
 170       }
 171     }
 172 
 173     // Unless there is an explicit 'continue', we must bail out here,
 174     // because 'mem' is an inscrutable memory state (e.g., a call).
 175     break;
 176   }
 177 
 178   return false;
 179 }
 180 
 181 // G1 pre/post barriers
 182 void G1BarrierSetC2::pre_barrier(GraphKit* kit,
 183                                  bool do_load,
 184                                  Node* ctl,
 185                                  Node* obj,
 186                                  Node* adr,
 187                                  uint alias_idx,
 188                                  Node* val,
 189                                  const TypeOopPtr* val_type,
 190                                  Node* pre_val,
 191                                  BasicType bt) const {
 192   // Some sanity checks
 193   // Note: val is unused in this routine.
 194 
 195   if (do_load) {
 196     // We need to generate the load of the previous value
 197     assert(obj != nullptr, "must have a base");
 198     assert(adr != nullptr, "where are loading from?");
 199     assert(pre_val == nullptr, "loaded already?");
 200     assert(val_type != nullptr, "need a type");
 201 
 202     if (use_ReduceInitialCardMarks()
 203         && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
 204       return;
 205     }
 206 
 207   } else {
 208     // In this case both val_type and alias_idx are unused.
 209     assert(pre_val != nullptr, "must be loaded already");
 210     // Nothing to be done if pre_val is null.
 211     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
 212     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
 213   }
 214   assert(bt == T_OBJECT, "or we shouldn't be here");
 215 
 216   IdealKit ideal(kit, true);
 217 
 218   Node* tls = __ thread(); // ThreadLocalStorage
 219 
 220   Node* no_base = __ top();
 221   Node* zero  = __ ConI(0);
 222   Node* zeroX = __ ConX(0);
 223 
 224   float likely  = PROB_LIKELY(0.999);
 225   float unlikely  = PROB_UNLIKELY(0.999);
 226 
 227   BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
 228   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
 229 
 230   // Offsets into the thread
 231   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 232   const int index_offset   = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
 233   const int buffer_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 234 
 235   // Now the actual pointers into the thread
 236   Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
 237   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
 238   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 239 
 240   // Now some of the values
 241   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 242 
 243   // if (!marking)
 244   __ if_then(marking, BoolTest::ne, zero, unlikely); {
 245     BasicType index_bt = TypeX_X->basic_type();
 246     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
 247     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 248 
 249     if (do_load) {
 250       // load original value
 251       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx, false, MemNode::unordered, LoadNode::Pinned);
 252     }
 253 
 254     // if (pre_val != nullptr)
 255     __ if_then(pre_val, BoolTest::ne, kit->null()); {
 256       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 257 
 258       // is the queue for this thread full?
 259       __ if_then(index, BoolTest::ne, zeroX, likely); {
 260 
 261         // decrement the index
 262         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 263 
 264         // Now get the buffer location we will log the previous value into and store it
 265         Node *log_addr = __ AddP(no_base, buffer, next_index);
 266         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
 267         // update the index
 268         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 269 
 270       } __ else_(); {
 271 
 272         // logging buffer is full, call the runtime
 273         const TypeFunc *tf = write_ref_field_pre_entry_Type();
 274         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
 275       } __ end_if();  // (!index)
 276     } __ end_if();  // (pre_val != nullptr)
 277   } __ end_if();  // (!marking)
 278 
 279   // Final sync IdealKit and GraphKit.
 280   kit->final_sync(ideal);
 281 }
 282 
 283 /*
 284  * G1 similar to any GC with a Young Generation requires a way to keep track of
 285  * references from Old Generation to Young Generation to make sure all live
 286  * objects are found. G1 also requires to keep track of object references
 287  * between different regions to enable evacuation of old regions, which is done
 288  * as part of mixed collections. References are tracked in remembered sets and
 289  * is continuously updated as reference are written to with the help of the
 290  * post-barrier.
 291  *
 292  * To reduce the number of updates to the remembered set the post-barrier
 293  * filters updates to fields in objects located in the Young Generation,
 294  * the same region as the reference, when the null is being written or
 295  * if the card is already marked as dirty by an earlier write.
 296  *
 297  * Under certain circumstances it is possible to avoid generating the
 298  * post-barrier completely if it is possible during compile time to prove
 299  * the object is newly allocated and that no safepoint exists between the
 300  * allocation and the store.
 301  *
 302  * In the case of slow allocation the allocation code must handle the barrier
 303  * as part of the allocation in the case the allocated object is not located
 304  * in the nursery; this would happen for humongous objects.
 305  *
 306  * Returns true if the post barrier can be removed
 307  */
 308 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
 309                                                 PhaseValues* phase, Node* store,
 310                                                 Node* adr) const {
 311   intptr_t      offset = 0;
 312   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 313   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base);
 314 
 315   if (offset == Type::OffsetBot) {
 316     return false; // cannot unalias unless there are precise offsets
 317   }
 318 
 319   if (alloc == nullptr) {
 320      return false; // No allocation found
 321   }
 322 
 323   // Start search from Store node
 324   Node* mem = store->in(MemNode::Control);
 325   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
 326 
 327     InitializeNode* st_init = mem->in(0)->as_Initialize();
 328     AllocateNode*  st_alloc = st_init->allocation();
 329 
 330     // Make sure we are looking at the same allocation
 331     if (alloc == st_alloc) {
 332       return true;
 333     }
 334   }
 335 
 336   return false;
 337 }
 338 
 339 //
 340 // Update the card table and add card address to the queue
 341 //
 342 void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
 343                                   IdealKit& ideal,
 344                                   Node* card_adr,
 345                                   Node* oop_store,
 346                                   uint oop_alias_idx,
 347                                   Node* index,
 348                                   Node* index_adr,
 349                                   Node* buffer,
 350                                   const TypeFunc* tf) const {
 351   Node* zero  = __ ConI(0);
 352   Node* zeroX = __ ConX(0);
 353   Node* no_base = __ top();
 354   BasicType card_bt = T_BYTE;
 355   // Smash zero into card. MUST BE ORDERED WRT TO STORE
 356   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
 357 
 358   //  Now do the queue work
 359   __ if_then(index, BoolTest::ne, zeroX); {
 360 
 361     Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
 362     Node* log_addr = __ AddP(no_base, buffer, next_index);
 363 
 364     // Order, see storeCM.
 365     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 366     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
 367 
 368   } __ else_(); {
 369     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
 370   } __ end_if();
 371 
 372 }
 373 
 374 void G1BarrierSetC2::post_barrier(GraphKit* kit,
 375                                   Node* ctl,
 376                                   Node* oop_store,
 377                                   Node* obj,
 378                                   Node* adr,
 379                                   uint alias_idx,
 380                                   Node* val,
 381                                   BasicType bt,
 382                                   bool use_precise) const {
 383   // If we are writing a null then we need no post barrier
 384 
 385   if (val != nullptr && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
 386     // Must be null
 387     const Type* t = val->bottom_type();
 388     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be null");
 389     // No post barrier if writing null
 390     return;
 391   }
 392 
 393   if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
 394     // We can skip marks on a freshly-allocated object in Eden.
 395     // Keep this code in sync with CardTableBarrierSet::on_slowpath_allocation_exit.
 396     // That routine informs GC to take appropriate compensating steps,
 397     // upon a slow-path allocation, so as to make this card-mark
 398     // elision safe.
 399     return;
 400   }
 401 
 402   if (use_ReduceInitialCardMarks()
 403       && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
 404     return;
 405   }
 406 
 407   if (!use_precise) {
 408     // All card marks for a (non-array) instance are in one place:
 409     adr = obj;
 410   }
 411   // (Else it's an array (or unknown), and we want more precise card marks.)
 412   assert(adr != nullptr, "");
 413 
 414   IdealKit ideal(kit, true);
 415 
 416   Node* tls = __ thread(); // ThreadLocalStorage
 417 
 418   Node* no_base = __ top();
 419   float likely = PROB_LIKELY_MAG(3);
 420   float unlikely = PROB_UNLIKELY_MAG(3);
 421   Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
 422   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
 423   Node* zeroX = __ ConX(0);
 424 
 425   const TypeFunc *tf = write_ref_field_post_entry_Type();
 426 
 427   // Offsets into the thread
 428   const int index_offset  = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
 429   const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
 430 
 431   // Pointers into the thread
 432 
 433   Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
 434   Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
 435 
 436   // Now some values
 437   // Use ctrl to avoid hoisting these values past a safepoint, which could
 438   // potentially reset these fields in the JavaThread.
 439   Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
 440   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 441 
 442   // Convert the store obj pointer to an int prior to doing math on it
 443   // Must use ctrl to prevent "integerized oop" existing across safepoint
 444   Node* cast =  __ CastPX(__ ctrl(), adr);
 445 
 446   Node* card_shift;
 447 #if INCLUDE_CDS
 448   if (SCCache::is_on_for_write()) {
 449     // load the card shift from the AOT Runtime Constants area
 450     Node* card_shift_adr =  __ makecon(TypeRawPtr::make(AOTRuntimeConstants::card_shift_address()));
 451     card_shift  = __ load(__ ctrl(), card_shift_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 452   } else
 453 #endif
 454   {
 455     card_shift = __ ConI(CardTable::card_shift());
 456   }
 457   // Divide pointer by card size
 458   Node* card_offset = __ URShiftX( cast, card_shift );
 459 
 460   // Combine card table base and card offset
 461   Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
 462 
 463   // If we know the value being stored does it cross regions?
 464 
 465   if (val != nullptr) {
 466     // Does the store cause us to cross regions?
 467 
 468     // Should be able to do an unsigned compare of region_size instead of
 469     // and extra shift. Do we have an unsigned compare??
 470     // Node* region_size = __ ConI(1 << G1HeapRegion::LogOfHRGrainBytes);
 471 #if INCLUDE_CDS
 472     Node* xor_res = __ XorX( cast,  __ CastPX(__ ctrl(), val));
 473     if (SCCache::is_on_for_write())  {
 474       // load the grain shift from the AOT Runtime Constants area
 475       Node* grain_shift_adr =  __ makecon(TypeRawPtr::make(AOTRuntimeConstants::grain_shift_address()));
 476       Node* grain_shift  = __ load(__ ctrl(), grain_shift_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 477       xor_res = __ URShiftX( xor_res, grain_shift);
 478     } else {
 479       xor_res = __ URShiftX ( xor_res, __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)));
 480     }
 481 #else
 482     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)));
 483 #endif
 484     // if (xor_res == 0) same region so skip
 485     __ if_then(xor_res, BoolTest::ne, zeroX, likely); {
 486 
 487       // No barrier if we are storing a null.
 488       __ if_then(val, BoolTest::ne, kit->null(), likely); {
 489 
 490         // Ok must mark the card if not already dirty
 491 
 492         // load the original value of the card
 493         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 494 
 495         __ if_then(card_val, BoolTest::ne, young_card, unlikely); {
 496           kit->sync_kit(ideal);
 497           kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
 498           __ sync_kit(kit);
 499 
 500           Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 501           __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
 502             g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 503           } __ end_if();
 504         } __ end_if();
 505       } __ end_if();
 506     } __ end_if();
 507   } else {
 508     // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
 509     // We don't need a barrier here if the destination is a newly allocated object
 510     // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
 511     // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
 512     assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 513     Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 514     __ if_then(card_val, BoolTest::ne, young_card); {
 515       g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
 516     } __ end_if();
 517   }
 518 
 519   // Final sync IdealKit and GraphKit.
 520   kit->final_sync(ideal);
 521 }
 522 
 523 // Helper that guards and inserts a pre-barrier.
 524 void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
 525                                         Node* pre_val, bool need_mem_bar) const {
 526   // We could be accessing the referent field of a reference object. If so, when G1
 527   // is enabled, we need to log the value in the referent field in an SATB buffer.
 528   // This routine performs some compile time filters and generates suitable
 529   // runtime filters that guard the pre-barrier code.
 530   // Also add memory barrier for non volatile load from the referent field
 531   // to prevent commoning of loads across safepoint.
 532 
 533   // Some compile time checks.
 534 
 535   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
 536   const TypeX* otype = offset->find_intptr_t_type();
 537   if (otype != nullptr && otype->is_con() &&
 538       otype->get_con() != java_lang_ref_Reference::referent_offset()) {
 539     // Constant offset but not the reference_offset so just return
 540     return;
 541   }
 542 
 543   // We only need to generate the runtime guards for instances.
 544   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
 545   if (btype != nullptr) {
 546     if (btype->isa_aryptr()) {
 547       // Array type so nothing to do
 548       return;
 549     }
 550 
 551     const TypeInstPtr* itype = btype->isa_instptr();
 552     if (itype != nullptr) {
 553       // Can the klass of base_oop be statically determined to be
 554       // _not_ a sub-class of Reference and _not_ Object?
 555       ciKlass* klass = itype->instance_klass();
 556       if (klass->is_loaded() &&
 557           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
 558           !kit->env()->Object_klass()->is_subtype_of(klass)) {
 559         return;
 560       }
 561     }
 562   }
 563 
 564   // The compile time filters did not reject base_oop/offset so
 565   // we need to generate the following runtime filters
 566   //
 567   // if (offset == java_lang_ref_Reference::_reference_offset) {
 568   //   if (instance_of(base, java.lang.ref.Reference)) {
 569   //     pre_barrier(_, pre_val, ...);
 570   //   }
 571   // }
 572 
 573   float likely   = PROB_LIKELY(  0.999);
 574   float unlikely = PROB_UNLIKELY(0.999);
 575 
 576   IdealKit ideal(kit);
 577 
 578   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset());
 579 
 580   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
 581       // Update graphKit memory and control from IdealKit.
 582       kit->sync_kit(ideal);
 583 
 584       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
 585       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
 586 
 587       // Update IdealKit memory and control from graphKit.
 588       __ sync_kit(kit);
 589 
 590       Node* one = __ ConI(1);
 591       // is_instof == 0 if base_oop == nullptr
 592       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
 593 
 594         // Update graphKit from IdeakKit.
 595         kit->sync_kit(ideal);
 596 
 597         // Use the pre-barrier to record the value in the referent field
 598         pre_barrier(kit, false /* do_load */,
 599                     __ ctrl(),
 600                     nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 601                     pre_val /* pre_val */,
 602                     T_OBJECT);
 603         if (need_mem_bar) {
 604           // Add memory barrier to prevent commoning reads from this field
 605           // across safepoint since GC can change its value.
 606           kit->insert_mem_bar(Op_MemBarCPUOrder);
 607         }
 608         // Update IdealKit from graphKit.
 609         __ sync_kit(kit);
 610 
 611       } __ end_if(); // _ref_type != ref_none
 612   } __ end_if(); // offset == referent_offset
 613 
 614   // Final sync IdealKit and GraphKit.
 615   kit->final_sync(ideal);
 616 }
 617 
 618 #undef __
 619 
 620 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 621   DecoratorSet decorators = access.decorators();
 622   Node* adr = access.addr().node();
 623   Node* obj = access.base();
 624 
 625   bool anonymous = (decorators & C2_UNSAFE_ACCESS) != 0;
 626   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 627   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 628   bool in_heap = (decorators & IN_HEAP) != 0;
 629   bool in_native = (decorators & IN_NATIVE) != 0;
 630   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 631   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 632   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 633   bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
 634   bool is_mixed = !in_heap && !in_native;
 635   bool need_cpu_mem_bar = !is_unordered || mismatched || is_mixed;
 636 
 637   Node* top = Compile::current()->top();
 638   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 639 
 640   // If we are reading the value of the referent field of a Reference
 641   // object (either by using Unsafe directly or through reflection)
 642   // then, if G1 is enabled, we need to record the referent in an
 643   // SATB log buffer using the pre-barrier mechanism.
 644   // Also we need to add memory barrier to prevent commoning reads
 645   // from this field across safepoint since GC can change its value.
 646   bool need_read_barrier = (((on_weak || on_phantom) && !no_keepalive) ||
 647                             (in_heap && unknown && offset != top && obj != top));
 648 
 649   if (!access.is_oop() || !need_read_barrier) {
 650     return CardTableBarrierSetC2::load_at_resolved(access, val_type);
 651   }
 652 
 653   assert(access.is_parse_access(), "entry not supported at optimization time");
 654 
 655   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 656   GraphKit* kit = parse_access.kit();
 657   Node* load;
 658 
 659   Node* control =  kit->control();
 660   const TypePtr* adr_type = access.addr().type();
 661   MemNode::MemOrd mo = access.mem_node_mo();
 662   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 663   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 664   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 665   // Pinned control dependency is the strictest. So it's ok to substitute it for any other.
 666   load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
 667       LoadNode::Pinned, requires_atomic_access, unaligned, mismatched, unsafe,
 668       access.barrier_data());
 669 
 670 
 671   if (on_weak || on_phantom) {
 672     // Use the pre-barrier to record the value in the referent field
 673     pre_barrier(kit, false /* do_load */,
 674                 kit->control(),
 675                 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
 676                 load /* pre_val */, T_OBJECT);
 677     // Add memory barrier to prevent commoning reads from this field
 678     // across safepoint since GC can change its value.
 679     kit->insert_mem_bar(Op_MemBarCPUOrder);
 680   } else if (unknown) {
 681     // We do not require a mem bar inside pre_barrier if need_mem_bar
 682     // is set: the barriers would be emitted by us.
 683     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 684   }
 685 
 686   return load;
 687 }
 688 
 689 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
 690   if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
 691     return true;
 692   }
 693   if (node->Opcode() != Op_CallLeaf) {
 694     return false;
 695   }
 696   CallLeafNode *call = node->as_CallLeaf();
 697   if (call->_name == nullptr) {
 698     return false;
 699   }
 700 
 701   return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
 702 }
 703 
 704 bool G1BarrierSetC2::is_g1_pre_val_load(Node* n) {
 705   if (n->is_Load() && n->as_Load()->has_pinned_control_dependency()) {
 706     // Make sure the only users of it are: CmpP, StoreP, and a call to write_ref_field_pre_entry
 707 
 708     // Skip possible decode
 709     if (n->outcnt() == 1 && n->unique_out()->is_DecodeN()) {
 710       n = n->unique_out();
 711     }
 712 
 713     if (n->outcnt() == 3) {
 714       int found = 0;
 715       for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
 716         Node* use = iter.get();
 717         if (use->is_Cmp() || use->is_Store()) {
 718           ++found;
 719         } else if (use->is_CallLeaf()) {
 720           CallLeafNode* call = use->as_CallLeaf();
 721           if (strcmp(call->_name, "write_ref_field_pre_entry") == 0) {
 722             ++found;
 723           }
 724         }
 725       }
 726       if (found == 3) {
 727         return true;
 728       }
 729     }
 730   }
 731   return false;
 732 }
 733 
 734 bool G1BarrierSetC2::is_gc_pre_barrier_node(Node *node) const {
 735   return is_g1_pre_val_load(node);
 736 }
 737 
 738 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
 739   if (is_g1_pre_val_load(node)) {
 740     macro->replace_node(node, macro->zerocon(node->as_Load()->bottom_type()->basic_type()));
 741   } else {
 742     assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
 743     assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
 744     // It could be only one user, URShift node, in Object.clone() intrinsic
 745     // but the new allocation is passed to arraycopy stub and it could not
 746     // be scalar replaced. So we don't check the case.
 747 
 748     // An other case of only one user (Xor) is when the value check for null
 749     // in G1 post barrier is folded after CCP so the code which used URShift
 750     // is removed.
 751 
 752     // Take Region node before eliminating post barrier since it also
 753     // eliminates CastP2X node when it has only one user.
 754     Node* this_region = node->in(0);
 755     assert(this_region != nullptr, "");
 756 
 757     // Remove G1 post barrier.
 758 
 759     // Search for CastP2X->Xor->URShift->Cmp path which
 760     // checks if the store done to a different from the value's region.
 761     // And replace Cmp with #0 (false) to collapse G1 post barrier.
 762     Node* xorx = node->find_out_with(Op_XorX);
 763     if (xorx != nullptr) {
 764       Node* shift = xorx->unique_out();
 765       Node* cmpx = shift->unique_out();
 766       assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 767           cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 768           "missing region check in G1 post barrier");
 769       macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 770 
 771       // Remove G1 pre barrier.
 772 
 773       // Search "if (marking != 0)" check and set it to "false".
 774       // There is no G1 pre barrier if previous stored value is null
 775       // (for example, after initialization).
 776       if (this_region->is_Region() && this_region->req() == 3) {
 777         int ind = 1;
 778         if (!this_region->in(ind)->is_IfFalse()) {
 779           ind = 2;
 780         }
 781         if (this_region->in(ind)->is_IfFalse() &&
 782             this_region->in(ind)->in(0)->Opcode() == Op_If) {
 783           Node* bol = this_region->in(ind)->in(0)->in(1);
 784           assert(bol->is_Bool(), "");
 785           cmpx = bol->in(1);
 786           if (bol->as_Bool()->_test._test == BoolTest::ne &&
 787               cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
 788               cmpx->in(1)->is_Load()) {
 789             Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
 790             const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 791             if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
 792                 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 793                 adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
 794               macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 795             }
 796           }
 797         }
 798       }
 799     } else {
 800       assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
 801       // This is a G1 post barrier emitted by the Object.clone() intrinsic.
 802       // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
 803       // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
 804       Node* shift = node->find_out_with(Op_URShiftX);
 805       assert(shift != nullptr, "missing G1 post barrier");
 806       Node* addp = shift->unique_out();
 807       Node* load = addp->find_out_with(Op_LoadB);
 808       assert(load != nullptr, "missing G1 post barrier");
 809       Node* cmpx = load->unique_out();
 810       assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
 811           cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
 812           "missing card value check in G1 post barrier");
 813       macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
 814       // There is no G1 pre barrier in this case
 815     }
 816     // Now CastP2X can be removed since it is used only on dead path
 817     // which currently still alive until igvn optimize it.
 818     assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
 819     macro->replace_node(node, macro->top());
 820   }
 821 }
 822 
 823 Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
 824   if (!use_ReduceInitialCardMarks() &&
 825       c != nullptr && c->is_Region() && c->req() == 3) {
 826     for (uint i = 1; i < c->req(); i++) {
 827       if (c->in(i) != nullptr && c->in(i)->is_Region() &&
 828           c->in(i)->req() == 3) {
 829         Node* r = c->in(i);
 830         for (uint j = 1; j < r->req(); j++) {
 831           if (r->in(j) != nullptr && r->in(j)->is_Proj() &&
 832               r->in(j)->in(0) != nullptr &&
 833               r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
 834               r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
 835             Node* call = r->in(j)->in(0);
 836             c = c->in(i == 1 ? 2 : 1);
 837             if (c != nullptr && c->Opcode() != Op_Parm) {
 838               c = c->in(0);
 839               if (c != nullptr) {
 840                 c = c->in(0);
 841                 assert(call->in(0) == nullptr ||
 842                        call->in(0)->in(0) == nullptr ||
 843                        call->in(0)->in(0)->in(0) == nullptr ||
 844                        call->in(0)->in(0)->in(0)->in(0) == nullptr ||
 845                        call->in(0)->in(0)->in(0)->in(0)->in(0) == nullptr ||
 846                        c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
 847                 return c;
 848               }
 849             }
 850           }
 851         }
 852       }
 853     }
 854   }
 855   return c;
 856 }
 857 
 858 #ifdef ASSERT
 859 bool G1BarrierSetC2::has_cas_in_use_chain(Node *n) const {
 860   Unique_Node_List visited;
 861   Node_List worklist;
 862   worklist.push(n);
 863   while (worklist.size() > 0) {
 864     Node* x = worklist.pop();
 865     if (visited.member(x)) {
 866       continue;
 867     } else {
 868       visited.push(x);
 869     }
 870 
 871     if (x->is_LoadStore()) {
 872       int op = x->Opcode();
 873       if (op == Op_CompareAndExchangeP || op == Op_CompareAndExchangeN ||
 874           op == Op_CompareAndSwapP     || op == Op_CompareAndSwapN     ||
 875           op == Op_WeakCompareAndSwapP || op == Op_WeakCompareAndSwapN) {
 876         return true;
 877       }
 878     }
 879     if (!x->is_CFG()) {
 880       for (SimpleDUIterator iter(x); iter.has_next(); iter.next()) {
 881         Node* use = iter.get();
 882         worklist.push(use);
 883       }
 884     }
 885   }
 886   return false;
 887 }
 888 
 889 void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads /*output*/) const {
 890   assert(loads.size() == 0, "Loads list should be empty");
 891   Node* pre_val_if = marking_if->find_out_with(Op_IfTrue)->find_out_with(Op_If);
 892   if (pre_val_if != nullptr) {
 893     Unique_Node_List visited;
 894     Node_List worklist;
 895     Node* pre_val = pre_val_if->in(1)->in(1)->in(1);
 896 
 897     worklist.push(pre_val);
 898     while (worklist.size() > 0) {
 899       Node* x = worklist.pop();
 900       if (visited.member(x)) {
 901         continue;
 902       } else {
 903         visited.push(x);
 904       }
 905 
 906       if (has_cas_in_use_chain(x)) {
 907         loads.clear();
 908         return;
 909       }
 910 
 911       if (x->is_Con()) {
 912         continue;
 913       }
 914       if (x->is_EncodeP() || x->is_DecodeN()) {
 915         worklist.push(x->in(1));
 916         continue;
 917       }
 918       if (x->is_Load() || x->is_LoadStore()) {
 919         assert(x->in(0) != nullptr, "Pre-val load has to have a control");
 920         loads.push(x);
 921         continue;
 922       }
 923       if (x->is_Phi()) {
 924         for (uint i = 1; i < x->req(); i++) {
 925           worklist.push(x->in(i));
 926         }
 927         continue;
 928       }
 929       assert(false, "Pre-val anomaly");
 930     }
 931   }
 932 }
 933 
 934 void G1BarrierSetC2::verify_no_safepoints(Compile* compile, Node* marking_check_if, const Unique_Node_List& loads) const {
 935   if (loads.size() == 0) {
 936     return;
 937   }
 938 
 939   if (loads.size() == 1) { // Handle the typical situation when there a single pre-value load
 940                            // that is dominated by the marking_check_if, that's true when the
 941                            // barrier itself does the pre-val load.
 942     Node *pre_val = loads.at(0);
 943     if (pre_val->in(0)->in(0) == marking_check_if) { // IfTrue->If
 944       return;
 945     }
 946   }
 947 
 948   // All other cases are when pre-value loads dominate the marking check.
 949   Unique_Node_List controls;
 950   for (uint i = 0; i < loads.size(); i++) {
 951     Node *c = loads.at(i)->in(0);
 952     controls.push(c);
 953   }
 954 
 955   Unique_Node_List visited;
 956   Unique_Node_List safepoints;
 957   Node_List worklist;
 958   uint found = 0;
 959 
 960   worklist.push(marking_check_if);
 961   while (worklist.size() > 0 && found < controls.size()) {
 962     Node* x = worklist.pop();
 963     if (x == nullptr || x == compile->top()) continue;
 964     if (visited.member(x)) {
 965       continue;
 966     } else {
 967       visited.push(x);
 968     }
 969 
 970     if (controls.member(x)) {
 971       found++;
 972     }
 973     if (x->is_Region()) {
 974       for (uint i = 1; i < x->req(); i++) {
 975         worklist.push(x->in(i));
 976       }
 977     } else {
 978       if (!x->is_SafePoint()) {
 979         worklist.push(x->in(0));
 980       } else {
 981         safepoints.push(x);
 982       }
 983     }
 984   }
 985   assert(found == controls.size(), "Pre-barrier structure anomaly or possible safepoint");
 986 }
 987 
 988 void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
 989   if (phase != BarrierSetC2::BeforeCodeGen) {
 990     return;
 991   }
 992   // Verify G1 pre-barriers
 993   const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 994 
 995   Unique_Node_List visited;
 996   Node_List worklist;
 997   // We're going to walk control flow backwards starting from the Root
 998   worklist.push(compile->root());
 999   while (worklist.size() > 0) {
1000     Node* x = worklist.pop();
1001     if (x == nullptr || x == compile->top()) continue;
1002     if (visited.member(x)) {
1003       continue;
1004     } else {
1005       visited.push(x);
1006     }
1007 
1008     if (x->is_Region()) {
1009       for (uint i = 1; i < x->req(); i++) {
1010         worklist.push(x->in(i));
1011       }
1012     } else {
1013       worklist.push(x->in(0));
1014       // We are looking for the pattern:
1015       //                            /->ThreadLocal
1016       // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
1017       //              \->ConI(0)
1018       // We want to verify that the If and the LoadB have the same control
1019       // See GraphKit::g1_write_barrier_pre()
1020       if (x->is_If()) {
1021         IfNode *iff = x->as_If();
1022         if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
1023           CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
1024           if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
1025               && cmp->in(1)->is_Load()) {
1026             LoadNode* load = cmp->in(1)->as_Load();
1027             if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
1028                 && load->in(2)->in(3)->is_Con()
1029                 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
1030 
1031               Node* if_ctrl = iff->in(0);
1032               Node* load_ctrl = load->in(0);
1033 
1034               if (if_ctrl != load_ctrl) {
1035                 // Skip possible CProj->NeverBranch in infinite loops
1036                 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
1037                     && if_ctrl->in(0)->is_NeverBranch()) {
1038                   if_ctrl = if_ctrl->in(0)->in(0);
1039                 }
1040               }
1041               assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
1042 
1043               Unique_Node_List loads;
1044               verify_pre_load(iff, loads);
1045               verify_no_safepoints(compile, iff, loads);
1046             }
1047           }
1048         }
1049       }
1050     }
1051   }
1052 }
1053 #endif
1054 
1055 bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
1056   if (opcode == Op_StoreP) {
1057     Node* adr = n->in(MemNode::Address);
1058     const Type* adr_type = gvn->type(adr);
1059     // Pointer stores in G1 barriers looks like unsafe access.
1060     // Ignore such stores to be able scalar replace non-escaping
1061     // allocations.
1062     if (adr_type->isa_rawptr() && adr->is_AddP()) {
1063       Node* base = conn_graph->get_addp_base(adr);
1064       if (base->Opcode() == Op_LoadP &&
1065           base->in(MemNode::Address)->is_AddP()) {
1066         adr = base->in(MemNode::Address);
1067         Node* tls = conn_graph->get_addp_base(adr);
1068         if (tls->Opcode() == Op_ThreadLocal) {
1069           int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1070           const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
1071           if (offs == buf_offset) {
1072             return true; // G1 pre barrier previous oop value store.
1073           }
1074           if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
1075             return true; // G1 post barrier card address store.
1076           }
1077         }
1078       }
1079     }
1080   }
1081   return false;
1082 }