1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/javaClasses.hpp" 26 #include "code/vmreg.inline.hpp" 27 #include "gc/g1/c2/g1BarrierSetC2.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1BarrierSetAssembler.hpp" 30 #include "gc/g1/g1BarrierSetRuntime.hpp" 31 #include "gc/g1/g1CardTable.hpp" 32 #include "gc/g1/g1ThreadLocalData.hpp" 33 #include "gc/g1/g1HeapRegion.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/block.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/graphKit.hpp" 39 #include "opto/idealKit.hpp" 40 #include "opto/machnode.hpp" 41 #include "opto/macro.hpp" 42 #include "opto/memnode.hpp" 43 #include "opto/node.hpp" 44 #include "opto/output.hpp" 45 #include "opto/regalloc.hpp" 46 #include "opto/rootnode.hpp" 47 #include "opto/runtime.hpp" 48 #include "opto/type.hpp" 49 #include "utilities/growableArray.hpp" 50 #include "utilities/macros.hpp" 51 52 /* 53 * Determine if the G1 pre-barrier can be removed. The pre-barrier is 54 * required by SATB to make sure all objects live at the start of the 55 * marking are kept alive, all reference updates need to any previous 56 * reference stored before writing. 57 * 58 * If the previous value is null there is no need to save the old value. 59 * References that are null are filtered during runtime by the barrier 60 * code to avoid unnecessary queuing. 61 * 62 * However in the case of newly allocated objects it might be possible to 63 * prove that the reference about to be overwritten is null during compile 64 * time and avoid adding the barrier code completely. 65 * 66 * The compiler needs to determine that the object in which a field is about 67 * to be written is newly allocated, and that no prior store to the same field 68 * has happened since the allocation. 69 */ 70 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit, 71 PhaseValues* phase, 72 Node* adr, 73 BasicType bt, 74 uint adr_idx) const { 75 intptr_t offset = 0; 76 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 77 AllocateNode* alloc = AllocateNode::Ideal_allocation(base); 78 79 if (offset == Type::OffsetBot) { 80 return false; // Cannot unalias unless there are precise offsets. 81 } 82 if (alloc == nullptr) { 83 return false; // No allocation found. 84 } 85 86 intptr_t size_in_bytes = type2aelembytes(bt); 87 Node* mem = kit->memory(adr_idx); // Start searching here. 88 89 for (int cnt = 0; cnt < 50; cnt++) { 90 if (mem->is_Store()) { 91 Node* st_adr = mem->in(MemNode::Address); 92 intptr_t st_offset = 0; 93 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 94 95 if (st_base == nullptr) { 96 break; // Inscrutable pointer. 97 } 98 if (st_base == base && st_offset == offset) { 99 // We have found a store with same base and offset as ours. 100 break; 101 } 102 if (st_offset != offset && st_offset != Type::OffsetBot) { 103 const int MAX_STORE = BytesPerLong; 104 if (st_offset >= offset + size_in_bytes || 105 st_offset <= offset - MAX_STORE || 106 st_offset <= offset - mem->as_Store()->memory_size()) { 107 // Success: The offsets are provably independent. 108 // (You may ask, why not just test st_offset != offset and be done? 109 // The answer is that stores of different sizes can co-exist 110 // in the same sequence of RawMem effects. We sometimes initialize 111 // a whole 'tile' of array elements with a single jint or jlong.) 112 mem = mem->in(MemNode::Memory); 113 continue; // Advance through independent store memory. 114 } 115 } 116 if (st_base != base 117 && MemNode::detect_ptr_independence(base, alloc, st_base, 118 AllocateNode::Ideal_allocation(st_base), 119 phase)) { 120 // Success: the bases are provably independent. 121 mem = mem->in(MemNode::Memory); 122 continue; // Advance through independent store memory. 123 } 124 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 125 InitializeNode* st_init = mem->in(0)->as_Initialize(); 126 AllocateNode* st_alloc = st_init->allocation(); 127 128 // Make sure that we are looking at the same allocation site. 129 // The alloc variable is guaranteed to not be null here from earlier check. 130 if (alloc == st_alloc) { 131 // Check that the initialization is storing null so that no previous store 132 // has been moved up and directly write a reference. 133 Node* captured_store = st_init->find_captured_store(offset, 134 type2aelembytes(T_OBJECT), 135 phase); 136 if (captured_store == nullptr || captured_store == st_init->zero_memory()) { 137 return true; 138 } 139 } 140 } 141 // Unless there is an explicit 'continue', we must bail out here, 142 // because 'mem' is an inscrutable memory state (e.g., a call). 143 break; 144 } 145 return false; 146 } 147 148 /* 149 * G1, similar to any GC with a Young Generation, requires a way to keep track 150 * of references from Old Generation to Young Generation to make sure all live 151 * objects are found. G1 also requires to keep track of object references 152 * between different regions to enable evacuation of old regions, which is done 153 * as part of mixed collections. References are tracked in remembered sets, 154 * which are continuously updated as references are written to with the help of 155 * the post-barrier. 156 * 157 * To reduce the number of updates to the remembered set, the post-barrier 158 * filters out updates to fields in objects located in the Young Generation, the 159 * same region as the reference, when null is being written, or if the card is 160 * already marked as dirty by an earlier write. 161 * 162 * Under certain circumstances it is possible to avoid generating the 163 * post-barrier completely, if it is possible during compile time to prove the 164 * object is newly allocated and that no safepoint exists between the allocation 165 * and the store. This can be seen as a compile-time version of the 166 * above-mentioned Young Generation filter. 167 * 168 * In the case of a slow allocation, the allocation code must handle the barrier 169 * as part of the allocation if the allocated object is not located in the 170 * nursery; this would happen for humongous objects. 171 */ 172 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit, 173 PhaseValues* phase, Node* store_ctrl, 174 Node* adr) const { 175 intptr_t offset = 0; 176 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 177 AllocateNode* alloc = AllocateNode::Ideal_allocation(base); 178 179 if (offset == Type::OffsetBot) { 180 return false; // Cannot unalias unless there are precise offsets. 181 } 182 if (alloc == nullptr) { 183 return false; // No allocation found. 184 } 185 186 Node* mem = store_ctrl; // Start search from Store node. 187 if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 188 InitializeNode* st_init = mem->in(0)->as_Initialize(); 189 AllocateNode* st_alloc = st_init->allocation(); 190 // Make sure we are looking at the same allocation 191 if (alloc == st_alloc) { 192 return true; 193 } 194 } 195 196 return false; 197 } 198 199 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { 200 DecoratorSet decorators = access.decorators(); 201 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 202 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 203 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0; 204 // If we are reading the value of the referent field of a Reference object, we 205 // need to record the referent in an SATB log buffer using the pre-barrier 206 // mechanism. Also we need to add a memory barrier to prevent commoning reads 207 // from this field across safepoints, since GC can change its value. 208 bool need_read_barrier = ((on_weak || on_phantom) && !no_keepalive); 209 if (access.is_oop() && need_read_barrier) { 210 access.set_barrier_data(G1C2BarrierPre); 211 } 212 return CardTableBarrierSetC2::load_at_resolved(access, val_type); 213 } 214 215 void G1BarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const { 216 eliminate_gc_barrier_data(node); 217 } 218 void G1BarrierSetC2::eliminate_gc_barrier_data(Node* node) const { 219 if (node->is_LoadStore()) { 220 LoadStoreNode* loadstore = node->as_LoadStore(); 221 loadstore->set_barrier_data(0); 222 } else if (node->is_Mem()) { 223 MemNode* mem = node->as_Mem(); 224 mem->set_barrier_data(0); 225 } 226 } 227 228 static void refine_barrier_by_new_val_type(const Node* n) { 229 if (n->Opcode() != Op_StoreP && 230 n->Opcode() != Op_StoreN) { 231 return; 232 } 233 MemNode* store = n->as_Mem(); 234 const Node* newval = n->in(MemNode::ValueIn); 235 assert(newval != nullptr, ""); 236 const Type* newval_bottom = newval->bottom_type(); 237 TypePtr::PTR newval_type = newval_bottom->make_ptr()->ptr(); 238 uint8_t barrier_data = store->barrier_data(); 239 if (!newval_bottom->isa_oopptr() && 240 !newval_bottom->isa_narrowoop() && 241 newval_type != TypePtr::Null) { 242 // newval is neither an OOP nor null, so there is no barrier to refine. 243 assert(barrier_data == 0, "non-OOP stores should have no barrier data"); 244 return; 245 } 246 if (barrier_data == 0) { 247 // No barrier to refine. 248 return; 249 } 250 if (newval_type == TypePtr::Null) { 251 // Simply elide post-barrier if writing null. 252 barrier_data &= ~G1C2BarrierPost; 253 barrier_data &= ~G1C2BarrierPostNotNull; 254 } else if (((barrier_data & G1C2BarrierPost) != 0) && 255 newval_type == TypePtr::NotNull) { 256 // If the post-barrier has not been elided yet (e.g. due to newval being 257 // freshly allocated), mark it as not-null (simplifies barrier tests and 258 // compressed OOPs logic). 259 barrier_data |= G1C2BarrierPostNotNull; 260 } 261 store->set_barrier_data(barrier_data); 262 return; 263 } 264 265 // Refine (not really expand) G1 barriers by looking at the new value type 266 // (whether it is necessarily null or necessarily non-null). 267 bool G1BarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { 268 ResourceMark rm; 269 VectorSet visited; 270 Node_List worklist; 271 worklist.push(C->root()); 272 while (worklist.size() > 0) { 273 Node* n = worklist.pop(); 274 if (visited.test_set(n->_idx)) { 275 continue; 276 } 277 refine_barrier_by_new_val_type(n); 278 for (uint j = 0; j < n->req(); j++) { 279 Node* in = n->in(j); 280 if (in != nullptr) { 281 worklist.push(in); 282 } 283 } 284 } 285 return false; 286 } 287 288 uint G1BarrierSetC2::estimated_barrier_size(const Node* node) const { 289 uint8_t barrier_data = MemNode::barrier_data(node); 290 uint nodes = 0; 291 if ((barrier_data & G1C2BarrierPre) != 0) { 292 // Only consider the fast path for the barrier that is 293 // actually inlined into the main code stream. 294 // The slow path is laid out separately and does not 295 // directly affect performance. 296 // It has a cost of 6 (AddP, LoadB, Cmp, Bool, If, IfProj). 297 nodes += 6; 298 } 299 if ((barrier_data & G1C2BarrierPost) != 0) { 300 nodes += 60; 301 } 302 return nodes; 303 } 304 305 bool G1BarrierSetC2::can_initialize_object(const StoreNode* store) const { 306 assert(store->Opcode() == Op_StoreP || store->Opcode() == Op_StoreN, "OOP store expected"); 307 // It is OK to move the store across the object initialization boundary only 308 // if it does not have any barrier, or if it has barriers that can be safely 309 // elided (because of the compensation steps taken on the allocation slow path 310 // when ReduceInitialCardMarks is enabled). 311 return (MemNode::barrier_data(store) == 0) || use_ReduceInitialCardMarks(); 312 } 313 314 void G1BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { 315 if (ac->is_clone_inst() && !use_ReduceInitialCardMarks()) { 316 clone_in_runtime(phase, ac, G1BarrierSetRuntime::clone_addr(), "G1BarrierSetRuntime::clone"); 317 return; 318 } 319 BarrierSetC2::clone_at_expansion(phase, ac); 320 } 321 322 Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { 323 DecoratorSet decorators = access.decorators(); 324 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 325 bool in_heap = (decorators & IN_HEAP) != 0; 326 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0; 327 bool need_store_barrier = !(tightly_coupled_alloc && use_ReduceInitialCardMarks()) && (in_heap || anonymous); 328 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0; 329 if (access.is_oop() && need_store_barrier) { 330 access.set_barrier_data(get_store_barrier(access)); 331 if (tightly_coupled_alloc) { 332 assert(!use_ReduceInitialCardMarks(), 333 "post-barriers are only needed for tightly-coupled initialization stores when ReduceInitialCardMarks is disabled"); 334 // Pre-barriers are unnecessary for tightly-coupled initialization stores. 335 access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre); 336 } 337 } 338 if (no_keepalive) { 339 // No keep-alive means no need for the pre-barrier. 340 access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre); 341 } 342 return BarrierSetC2::store_at_resolved(access, val); 343 } 344 345 Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 346 Node* new_val, const Type* value_type) const { 347 GraphKit* kit = access.kit(); 348 if (!access.is_oop()) { 349 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 350 } 351 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost); 352 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); 353 } 354 355 Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 356 Node* new_val, const Type* value_type) const { 357 GraphKit* kit = access.kit(); 358 if (!access.is_oop()) { 359 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 360 } 361 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost); 362 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); 363 } 364 365 Node* G1BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const { 366 GraphKit* kit = access.kit(); 367 if (!access.is_oop()) { 368 return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type); 369 } 370 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost); 371 return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type); 372 } 373 374 class G1BarrierSetC2State : public BarrierSetC2State { 375 private: 376 GrowableArray<G1BarrierStubC2*>* _stubs; 377 378 public: 379 G1BarrierSetC2State(Arena* arena) 380 : BarrierSetC2State(arena), 381 _stubs(new (arena) GrowableArray<G1BarrierStubC2*>(arena, 8, 0, nullptr)) {} 382 383 GrowableArray<G1BarrierStubC2*>* stubs() { 384 return _stubs; 385 } 386 387 bool needs_liveness_data(const MachNode* mach) const { 388 return G1PreBarrierStubC2::needs_barrier(mach) || 389 G1PostBarrierStubC2::needs_barrier(mach); 390 } 391 392 bool needs_livein_data() const { 393 return false; 394 } 395 }; 396 397 static G1BarrierSetC2State* barrier_set_state() { 398 return reinterpret_cast<G1BarrierSetC2State*>(Compile::current()->barrier_set_state()); 399 } 400 401 G1BarrierStubC2::G1BarrierStubC2(const MachNode* node) : BarrierStubC2(node) {} 402 403 G1PreBarrierStubC2::G1PreBarrierStubC2(const MachNode* node) : G1BarrierStubC2(node) {} 404 405 bool G1PreBarrierStubC2::needs_barrier(const MachNode* node) { 406 return (node->barrier_data() & G1C2BarrierPre) != 0; 407 } 408 409 G1PreBarrierStubC2* G1PreBarrierStubC2::create(const MachNode* node) { 410 G1PreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) G1PreBarrierStubC2(node); 411 if (!Compile::current()->output()->in_scratch_emit_size()) { 412 barrier_set_state()->stubs()->append(stub); 413 } 414 return stub; 415 } 416 417 void G1PreBarrierStubC2::initialize_registers(Register obj, Register pre_val, Register thread, Register tmp1, Register tmp2) { 418 _obj = obj; 419 _pre_val = pre_val; 420 _thread = thread; 421 _tmp1 = tmp1; 422 _tmp2 = tmp2; 423 } 424 425 Register G1PreBarrierStubC2::obj() const { 426 return _obj; 427 } 428 429 Register G1PreBarrierStubC2::pre_val() const { 430 return _pre_val; 431 } 432 433 Register G1PreBarrierStubC2::thread() const { 434 return _thread; 435 } 436 437 Register G1PreBarrierStubC2::tmp1() const { 438 return _tmp1; 439 } 440 441 Register G1PreBarrierStubC2::tmp2() const { 442 return _tmp2; 443 } 444 445 void G1PreBarrierStubC2::emit_code(MacroAssembler& masm) { 446 G1BarrierSetAssembler* bs = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler()); 447 bs->generate_c2_pre_barrier_stub(&masm, this); 448 } 449 450 G1PostBarrierStubC2::G1PostBarrierStubC2(const MachNode* node) : G1BarrierStubC2(node) {} 451 452 bool G1PostBarrierStubC2::needs_barrier(const MachNode* node) { 453 return (node->barrier_data() & G1C2BarrierPost) != 0; 454 } 455 456 G1PostBarrierStubC2* G1PostBarrierStubC2::create(const MachNode* node) { 457 G1PostBarrierStubC2* const stub = new (Compile::current()->comp_arena()) G1PostBarrierStubC2(node); 458 if (!Compile::current()->output()->in_scratch_emit_size()) { 459 barrier_set_state()->stubs()->append(stub); 460 } 461 return stub; 462 } 463 464 void G1PostBarrierStubC2::initialize_registers(Register thread, Register tmp1, Register tmp2, Register tmp3) { 465 _thread = thread; 466 _tmp1 = tmp1; 467 _tmp2 = tmp2; 468 _tmp3 = tmp3; 469 } 470 471 Register G1PostBarrierStubC2::thread() const { 472 return _thread; 473 } 474 475 Register G1PostBarrierStubC2::tmp1() const { 476 return _tmp1; 477 } 478 479 Register G1PostBarrierStubC2::tmp2() const { 480 return _tmp2; 481 } 482 483 Register G1PostBarrierStubC2::tmp3() const { 484 return _tmp3; 485 } 486 487 void G1PostBarrierStubC2::emit_code(MacroAssembler& masm) { 488 G1BarrierSetAssembler* bs = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler()); 489 bs->generate_c2_post_barrier_stub(&masm, this); 490 } 491 492 void* G1BarrierSetC2::create_barrier_state(Arena* comp_arena) const { 493 return new (comp_arena) G1BarrierSetC2State(comp_arena); 494 } 495 496 int G1BarrierSetC2::get_store_barrier(C2Access& access) const { 497 if (!access.is_parse_access()) { 498 // Only support for eliding barriers at parse time for now. 499 return G1C2BarrierPre | G1C2BarrierPost; 500 } 501 GraphKit* kit = (static_cast<C2ParseAccess&>(access)).kit(); 502 Node* ctl = kit->control(); 503 Node* adr = access.addr().node(); 504 uint adr_idx = kit->C->get_alias_index(access.addr().type()); 505 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory"); 506 507 bool can_remove_pre_barrier = g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, access.type(), adr_idx); 508 509 // We can skip marks on a freshly-allocated object in Eden. Keep this code in 510 // sync with CardTableBarrierSet::on_slowpath_allocation_exit. That routine 511 // informs GC to take appropriate compensating steps, upon a slow-path 512 // allocation, so as to make this card-mark elision safe. 513 // The post-barrier can also be removed if null is written. This case is 514 // handled by G1BarrierSetC2::expand_barriers, which runs at the end of C2's 515 // platform-independent optimizations to exploit stronger type information. 516 bool can_remove_post_barrier = use_ReduceInitialCardMarks() && 517 ((access.base() == kit->just_allocated_object(ctl)) || 518 g1_can_remove_post_barrier(kit, &kit->gvn(), ctl, adr)); 519 520 int barriers = 0; 521 if (!can_remove_pre_barrier) { 522 barriers |= G1C2BarrierPre; 523 } 524 if (!can_remove_post_barrier) { 525 barriers |= G1C2BarrierPost; 526 } 527 528 return barriers; 529 } 530 531 void G1BarrierSetC2::elide_dominated_barrier(MachNode* mach) const { 532 uint8_t barrier_data = mach->barrier_data(); 533 barrier_data &= ~G1C2BarrierPre; 534 if (CardTableBarrierSetC2::use_ReduceInitialCardMarks()) { 535 barrier_data &= ~G1C2BarrierPost; 536 barrier_data &= ~G1C2BarrierPostNotNull; 537 } 538 mach->set_barrier_data(barrier_data); 539 } 540 541 void G1BarrierSetC2::analyze_dominating_barriers() const { 542 ResourceMark rm; 543 PhaseCFG* const cfg = Compile::current()->cfg(); 544 545 // Find allocations and memory accesses (stores and atomic operations), and 546 // track them in lists. 547 Node_List accesses; 548 Node_List allocations; 549 for (uint i = 0; i < cfg->number_of_blocks(); ++i) { 550 const Block* const block = cfg->get_block(i); 551 for (uint j = 0; j < block->number_of_nodes(); ++j) { 552 Node* const node = block->get_node(j); 553 if (node->is_Phi()) { 554 if (BarrierSetC2::is_allocation(node)) { 555 allocations.push(node); 556 } 557 continue; 558 } else if (!node->is_Mach()) { 559 continue; 560 } 561 562 MachNode* const mach = node->as_Mach(); 563 switch (mach->ideal_Opcode()) { 564 case Op_StoreP: 565 case Op_StoreN: 566 case Op_CompareAndExchangeP: 567 case Op_CompareAndSwapP: 568 case Op_GetAndSetP: 569 case Op_CompareAndExchangeN: 570 case Op_CompareAndSwapN: 571 case Op_GetAndSetN: 572 if (mach->barrier_data() != 0) { 573 accesses.push(mach); 574 } 575 break; 576 default: 577 break; 578 } 579 } 580 } 581 582 // Find dominating allocations for each memory access (store or atomic 583 // operation) and elide barriers if there is no safepoint poll in between. 584 elide_dominated_barriers(accesses, allocations); 585 } 586 587 void G1BarrierSetC2::late_barrier_analysis() const { 588 compute_liveness_at_stubs(); 589 analyze_dominating_barriers(); 590 } 591 592 void G1BarrierSetC2::emit_stubs(CodeBuffer& cb) const { 593 MacroAssembler masm(&cb); 594 GrowableArray<G1BarrierStubC2*>* const stubs = barrier_set_state()->stubs(); 595 for (int i = 0; i < stubs->length(); i++) { 596 // Make sure there is enough space in the code buffer 597 if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) { 598 ciEnv::current()->record_failure("CodeCache is full"); 599 return; 600 } 601 stubs->at(i)->emit_code(masm); 602 } 603 masm.flush(); 604 } 605 606 #ifndef PRODUCT 607 void G1BarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const { 608 if ((mach->barrier_data() & G1C2BarrierPre) != 0) { 609 st->print("pre "); 610 } 611 if ((mach->barrier_data() & G1C2BarrierPost) != 0) { 612 st->print("post "); 613 } 614 if ((mach->barrier_data() & G1C2BarrierPostNotNull) != 0) { 615 st->print("notnull "); 616 } 617 } 618 #endif // !PRODUCT