1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.hpp"
26 #include "code/vmreg.inline.hpp"
27 #include "gc/g1/c2/g1BarrierSetC2.hpp"
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1BarrierSetAssembler.hpp"
30 #include "gc/g1/g1BarrierSetRuntime.hpp"
31 #include "gc/g1/g1CardTable.hpp"
32 #include "gc/g1/g1HeapRegion.hpp"
33 #include "gc/g1/g1ThreadLocalData.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/block.hpp"
36 #include "opto/compile.hpp"
37 #include "opto/escape.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/machnode.hpp"
41 #include "opto/macro.hpp"
42 #include "opto/memnode.hpp"
43 #include "opto/node.hpp"
44 #include "opto/output.hpp"
45 #include "opto/regalloc.hpp"
46 #include "opto/rootnode.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/type.hpp"
49 #include "utilities/growableArray.hpp"
50 #include "utilities/macros.hpp"
51
52 /*
53 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
54 * required by SATB to make sure all objects live at the start of the
55 * marking are kept alive, all reference updates need to any previous
56 * reference stored before writing.
57 *
58 * If the previous value is null there is no need to save the old value.
59 * References that are null are filtered during runtime by the barrier
60 * code to avoid unnecessary queuing.
61 *
62 * However in the case of newly allocated objects it might be possible to
63 * prove that the reference about to be overwritten is null during compile
64 * time and avoid adding the barrier code completely.
65 *
66 * The compiler needs to determine that the object in which a field is about
67 * to be written is newly allocated, and that no prior store to the same field
68 * has happened since the allocation.
69 */
70 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
71 PhaseValues* phase,
72 Node* adr,
73 BasicType bt,
74 uint adr_idx) const {
75 intptr_t offset = 0;
76 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
77 AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
78
79 if (offset == Type::OffsetBot) {
80 return false; // Cannot unalias unless there are precise offsets.
81 }
82 if (alloc == nullptr) {
83 return false; // No allocation found.
84 }
85
86 intptr_t size_in_bytes = type2aelembytes(bt);
87 Node* mem = kit->memory(adr_idx); // Start searching here.
88
89 for (int cnt = 0; cnt < 50; cnt++) {
90 if (mem->is_Store()) {
91 Node* st_adr = mem->in(MemNode::Address);
92 intptr_t st_offset = 0;
93 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
94
95 if (st_base == nullptr) {
96 break; // Inscrutable pointer.
97 }
98 if (st_base == base && st_offset == offset) {
99 // We have found a store with same base and offset as ours.
100 break;
101 }
102 if (st_offset != offset && st_offset != Type::OffsetBot) {
103 const int MAX_STORE = BytesPerLong;
104 if (st_offset >= offset + size_in_bytes ||
105 st_offset <= offset - MAX_STORE ||
106 st_offset <= offset - mem->as_Store()->memory_size()) {
107 // Success: The offsets are provably independent.
108 // (You may ask, why not just test st_offset != offset and be done?
109 // The answer is that stores of different sizes can co-exist
110 // in the same sequence of RawMem effects. We sometimes initialize
111 // a whole 'tile' of array elements with a single jint or jlong.)
112 mem = mem->in(MemNode::Memory);
113 continue; // Advance through independent store memory.
114 }
115 }
116 if (st_base != base
117 && MemNode::detect_ptr_independence(base, alloc, st_base,
118 AllocateNode::Ideal_allocation(st_base),
119 phase)) {
120 // Success: the bases are provably independent.
121 mem = mem->in(MemNode::Memory);
122 continue; // Advance through independent store memory.
123 }
124 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
125 InitializeNode* st_init = mem->in(0)->as_Initialize();
126 AllocateNode* st_alloc = st_init->allocation();
127
128 // Make sure that we are looking at the same allocation site.
129 // The alloc variable is guaranteed to not be null here from earlier check.
130 if (alloc == st_alloc) {
131 // Check that the initialization is storing null so that no previous store
132 // has been moved up and directly write a reference.
133 Node* captured_store = st_init->find_captured_store(offset,
134 type2aelembytes(T_OBJECT),
135 phase);
136 if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
137 return true;
138 }
139 }
140 }
141 // Unless there is an explicit 'continue', we must bail out here,
142 // because 'mem' is an inscrutable memory state (e.g., a call).
143 break;
144 }
145 return false;
146 }
147
148 /*
149 * G1, similar to any GC with a Young Generation, requires a way to keep track
150 * of references from Old Generation to Young Generation to make sure all live
151 * objects are found. G1 also requires to keep track of object references
152 * between different regions to enable evacuation of old regions, which is done
153 * as part of mixed collections. References are tracked in remembered sets,
154 * which are continuously updated as references are written to with the help of
155 * the post-barrier.
156 *
157 * To reduce the number of updates to the remembered set, the post-barrier
158 * filters out updates to fields in objects located in the Young Generation, the
159 * same region as the reference, when null is being written, or if the card is
160 * already marked as dirty by an earlier write.
161 *
162 * Under certain circumstances it is possible to avoid generating the
163 * post-barrier completely, if it is possible during compile time to prove the
164 * object is newly allocated and that no safepoint exists between the allocation
165 * and the store. This can be seen as a compile-time version of the
166 * above-mentioned Young Generation filter.
167 *
168 * In the case of a slow allocation, the allocation code must handle the barrier
169 * as part of the allocation if the allocated object is not located in the
170 * nursery; this would happen for humongous objects.
171 */
172 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
173 PhaseValues* phase, Node* store_ctrl,
174 Node* adr) const {
175 intptr_t offset = 0;
176 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
177 AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
178
179 if (offset == Type::OffsetBot) {
180 return false; // Cannot unalias unless there are precise offsets.
181 }
182 if (alloc == nullptr) {
183 return false; // No allocation found.
184 }
185
186 Node* mem = store_ctrl; // Start search from Store node.
187 if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
188 InitializeNode* st_init = mem->in(0)->as_Initialize();
189 AllocateNode* st_alloc = st_init->allocation();
190 // Make sure we are looking at the same allocation
191 if (alloc == st_alloc) {
192 return true;
193 }
194 }
195
196 return false;
197 }
198
199 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
200 DecoratorSet decorators = access.decorators();
201 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
202 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
203 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
204 // If we are reading the value of the referent field of a Reference object, we
205 // need to record the referent in an SATB log buffer using the pre-barrier
206 // mechanism. Also we need to add a memory barrier to prevent commoning reads
207 // from this field across safepoints, since GC can change its value.
208 bool need_read_barrier = ((on_weak || on_phantom) && !no_keepalive);
209 if (access.is_oop() && need_read_barrier) {
210 access.set_barrier_data(G1C2BarrierPre);
211 }
212 return CardTableBarrierSetC2::load_at_resolved(access, val_type);
213 }
214
215 void G1BarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
216 eliminate_gc_barrier_data(node);
217 }
218 void G1BarrierSetC2::eliminate_gc_barrier_data(Node* node) const {
219 if (node->is_LoadStore()) {
220 LoadStoreNode* loadstore = node->as_LoadStore();
221 loadstore->set_barrier_data(0);
222 } else if (node->is_Mem()) {
223 MemNode* mem = node->as_Mem();
224 mem->set_barrier_data(0);
225 }
226 }
227
228 static void refine_barrier_by_new_val_type(const Node* n) {
229 if (n->Opcode() != Op_StoreP &&
230 n->Opcode() != Op_StoreN) {
231 return;
232 }
233 MemNode* store = n->as_Mem();
234 const Node* newval = n->in(MemNode::ValueIn);
235 assert(newval != nullptr, "");
236 const Type* newval_bottom = newval->bottom_type();
237 TypePtr::PTR newval_type = newval_bottom->make_ptr()->ptr();
238 uint8_t barrier_data = store->barrier_data();
239 if (!newval_bottom->isa_oopptr() &&
240 !newval_bottom->isa_narrowoop() &&
241 newval_type != TypePtr::Null) {
242 // newval is neither an OOP nor null, so there is no barrier to refine.
243 assert(barrier_data == 0, "non-OOP stores should have no barrier data");
244 return;
245 }
246 if (barrier_data == 0) {
247 // No barrier to refine.
248 return;
249 }
250 if (newval_type == TypePtr::Null) {
251 // Simply elide post-barrier if writing null.
252 barrier_data &= ~G1C2BarrierPost;
253 barrier_data &= ~G1C2BarrierPostNotNull;
254 } else if (((barrier_data & G1C2BarrierPost) != 0) &&
255 newval_type == TypePtr::NotNull) {
256 // If the post-barrier has not been elided yet (e.g. due to newval being
257 // freshly allocated), mark it as not-null (simplifies barrier tests and
258 // compressed OOPs logic).
259 barrier_data |= G1C2BarrierPostNotNull;
260 }
261 store->set_barrier_data(barrier_data);
262 return;
263 }
264
265 // Refine (not really expand) G1 barriers by looking at the new value type
266 // (whether it is necessarily null or necessarily non-null).
267 bool G1BarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
268 ResourceMark rm;
269 VectorSet visited;
270 Node_List worklist;
271 worklist.push(C->root());
272 while (worklist.size() > 0) {
273 Node* n = worklist.pop();
274 if (visited.test_set(n->_idx)) {
275 continue;
276 }
277 refine_barrier_by_new_val_type(n);
278 for (uint j = 0; j < n->req(); j++) {
279 Node* in = n->in(j);
280 if (in != nullptr) {
281 worklist.push(in);
282 }
283 }
284 }
285 return false;
286 }
287
288 uint G1BarrierSetC2::estimated_barrier_size(const Node* node) const {
289 uint8_t barrier_data = MemNode::barrier_data(node);
290 uint nodes = 0;
291 if ((barrier_data & G1C2BarrierPre) != 0) {
292 // Only consider the fast path for the barrier that is
293 // actually inlined into the main code stream.
294 // The slow path is laid out separately and does not
295 // directly affect performance.
296 // It has a cost of 6 (AddP, LoadB, Cmp, Bool, If, IfProj).
297 nodes += 6;
298 }
299 if ((barrier_data & G1C2BarrierPost) != 0) {
300 // Approximate the number of nodes needed; an if costs 4 nodes (Cmp, Bool,
301 // If, If projection), any other (Assembly) instruction is approximated with
302 // a cost of 1.
303 nodes += 4 // base cost for the card write containing getting base offset, address calculation and the card write;
304 + 6 // same region check: Uncompress (new_val) oop, xor, shr, (cmp), jmp
305 + 4 // new_val is null check
306 + (UseCondCardMark ? 4 : 0); // card not clean check.
307 }
308 return nodes;
309 }
310
311 bool G1BarrierSetC2::can_initialize_object(const StoreNode* store) const {
312 assert(store->Opcode() == Op_StoreP || store->Opcode() == Op_StoreN, "OOP store expected");
313 // It is OK to move the store across the object initialization boundary only
314 // if it does not have any barrier, or if it has barriers that can be safely
315 // elided (because of the compensation steps taken on the allocation slow path
316 // when ReduceInitialCardMarks is enabled).
317 return (MemNode::barrier_data(store) == 0) || use_ReduceInitialCardMarks();
318 }
319
320 void G1BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
321 if (ac->is_clone_inst() && !use_ReduceInitialCardMarks()) {
322 clone_in_runtime(phase, ac, G1BarrierSetRuntime::clone_addr(), "G1BarrierSetRuntime::clone");
323 return;
324 }
325 BarrierSetC2::clone_at_expansion(phase, ac);
326 }
327
328 Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
329 DecoratorSet decorators = access.decorators();
330 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
331 bool in_heap = (decorators & IN_HEAP) != 0;
332 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
333 bool need_store_barrier = !(tightly_coupled_alloc && use_ReduceInitialCardMarks()) && (in_heap || anonymous);
334 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
335 if (access.is_oop() && need_store_barrier) {
336 access.set_barrier_data(get_store_barrier(access));
337 if (tightly_coupled_alloc) {
338 assert(!use_ReduceInitialCardMarks(),
339 "post-barriers are only needed for tightly-coupled initialization stores when ReduceInitialCardMarks is disabled");
340 // Pre-barriers are unnecessary for tightly-coupled initialization stores.
341 access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre);
342 }
343 }
344 if (no_keepalive) {
345 // No keep-alive means no need for the pre-barrier.
346 access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre);
347 }
348 return BarrierSetC2::store_at_resolved(access, val);
349 }
350
351 Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
352 Node* new_val, const Type* value_type) const {
353 GraphKit* kit = access.kit();
354 if (!access.is_oop()) {
355 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
356 }
357 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
358 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
359 }
360
361 Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
362 Node* new_val, const Type* value_type) const {
363 GraphKit* kit = access.kit();
364 if (!access.is_oop()) {
365 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
366 }
367 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
368 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
369 }
370
371 Node* G1BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
372 GraphKit* kit = access.kit();
373 if (!access.is_oop()) {
374 return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
375 }
376 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
377 return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
378 }
379
380 class G1BarrierSetC2State : public BarrierSetC2State {
381 private:
382 GrowableArray<G1BarrierStubC2*>* _stubs;
383
384 public:
385 G1BarrierSetC2State(Arena* arena)
386 : BarrierSetC2State(arena),
387 _stubs(new (arena) GrowableArray<G1BarrierStubC2*>(arena, 8, 0, nullptr)) {}
388
389 GrowableArray<G1BarrierStubC2*>* stubs() {
390 return _stubs;
391 }
392
393 bool needs_liveness_data(const MachNode* mach) const {
394 // Liveness data is only required to compute registers that must be preserved
395 // across the runtime call in the pre-barrier stub.
396 return G1BarrierStubC2::needs_pre_barrier(mach);
397 }
398
399 bool needs_livein_data() const {
400 return false;
401 }
402 };
403
404 static G1BarrierSetC2State* barrier_set_state() {
405 return reinterpret_cast<G1BarrierSetC2State*>(Compile::current()->barrier_set_state());
406 }
407
408 G1BarrierStubC2::G1BarrierStubC2(const MachNode* node) : BarrierStubC2(node) {}
409
410 bool G1BarrierStubC2::needs_pre_barrier(const MachNode* node) {
411 return (node->barrier_data() & G1C2BarrierPre) != 0;
412 }
413
414 bool G1BarrierStubC2::needs_post_barrier(const MachNode* node) {
415 return (node->barrier_data() & G1C2BarrierPost) != 0;
416 }
417
418 bool G1BarrierStubC2::post_new_val_may_be_null(const MachNode* node) {
419 return (node->barrier_data() & G1C2BarrierPostNotNull) == 0;
420 }
421
422 G1PreBarrierStubC2::G1PreBarrierStubC2(const MachNode* node) : G1BarrierStubC2(node) {}
423
424 bool G1PreBarrierStubC2::needs_barrier(const MachNode* node) {
425 return needs_pre_barrier(node);
426 }
427
428 G1PreBarrierStubC2* G1PreBarrierStubC2::create(const MachNode* node) {
429 G1PreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) G1PreBarrierStubC2(node);
430 if (!Compile::current()->output()->in_scratch_emit_size()) {
431 barrier_set_state()->stubs()->append(stub);
432 }
433 return stub;
434 }
435
436 void G1PreBarrierStubC2::initialize_registers(Register obj, Register pre_val, Register thread, Register tmp1, Register tmp2) {
437 _obj = obj;
438 _pre_val = pre_val;
439 _thread = thread;
440 _tmp1 = tmp1;
441 _tmp2 = tmp2;
442 }
443
444 Register G1PreBarrierStubC2::obj() const {
445 return _obj;
446 }
447
448 Register G1PreBarrierStubC2::pre_val() const {
449 return _pre_val;
450 }
451
452 Register G1PreBarrierStubC2::thread() const {
453 return _thread;
454 }
455
456 Register G1PreBarrierStubC2::tmp1() const {
457 return _tmp1;
458 }
459
460 Register G1PreBarrierStubC2::tmp2() const {
461 return _tmp2;
462 }
463
464 void G1PreBarrierStubC2::emit_code(MacroAssembler& masm) {
465 G1BarrierSetAssembler* bs = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
466 bs->generate_c2_pre_barrier_stub(&masm, this);
467 }
468
469 void* G1BarrierSetC2::create_barrier_state(Arena* comp_arena) const {
470 return new (comp_arena) G1BarrierSetC2State(comp_arena);
471 }
472
473 int G1BarrierSetC2::get_store_barrier(C2Access& access) const {
474 if (!access.is_parse_access()) {
475 // Only support for eliding barriers at parse time for now.
476 return G1C2BarrierPre | G1C2BarrierPost;
477 }
478 GraphKit* kit = (static_cast<C2ParseAccess&>(access)).kit();
479 Node* ctl = kit->control();
480 Node* adr = access.addr().node();
481 uint adr_idx = kit->C->get_alias_index(access.addr().type());
482 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
483
484 bool can_remove_pre_barrier = g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, access.type(), adr_idx);
485
486 // We can skip marks on a freshly-allocated object in Eden. Keep this code in
487 // sync with CardTableBarrierSet::on_slowpath_allocation_exit. That routine
488 // informs GC to take appropriate compensating steps, upon a slow-path
489 // allocation, so as to make this card-mark elision safe.
490 // The post-barrier can also be removed if null is written. This case is
491 // handled by G1BarrierSetC2::expand_barriers, which runs at the end of C2's
492 // platform-independent optimizations to exploit stronger type information.
493 bool can_remove_post_barrier = use_ReduceInitialCardMarks() &&
494 ((access.base() == kit->just_allocated_object(ctl)) ||
495 g1_can_remove_post_barrier(kit, &kit->gvn(), ctl, adr));
496
497 int barriers = 0;
498 if (!can_remove_pre_barrier) {
499 barriers |= G1C2BarrierPre;
500 }
501 if (!can_remove_post_barrier) {
502 barriers |= G1C2BarrierPost;
503 }
504
505 return barriers;
506 }
507
508 void G1BarrierSetC2::elide_dominated_barrier(MachNode* mach) const {
509 uint8_t barrier_data = mach->barrier_data();
510 barrier_data &= ~G1C2BarrierPre;
511 if (CardTableBarrierSetC2::use_ReduceInitialCardMarks()) {
512 barrier_data &= ~G1C2BarrierPost;
513 barrier_data &= ~G1C2BarrierPostNotNull;
514 }
515 mach->set_barrier_data(barrier_data);
516 }
517
518 void G1BarrierSetC2::analyze_dominating_barriers() const {
519 ResourceMark rm;
520 PhaseCFG* const cfg = Compile::current()->cfg();
521
522 // Find allocations and memory accesses (stores and atomic operations), and
523 // track them in lists.
524 Node_List accesses;
525 Node_List allocations;
526 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
527 const Block* const block = cfg->get_block(i);
528 for (uint j = 0; j < block->number_of_nodes(); ++j) {
529 Node* const node = block->get_node(j);
530 if (node->is_Phi()) {
531 if (BarrierSetC2::is_allocation(node)) {
532 allocations.push(node);
533 }
534 continue;
535 } else if (!node->is_Mach()) {
536 continue;
537 }
538
539 MachNode* const mach = node->as_Mach();
540 switch (mach->ideal_Opcode()) {
541 case Op_StoreP:
542 case Op_StoreN:
543 case Op_CompareAndExchangeP:
544 case Op_CompareAndSwapP:
545 case Op_GetAndSetP:
546 case Op_CompareAndExchangeN:
547 case Op_CompareAndSwapN:
548 case Op_GetAndSetN:
549 if (mach->barrier_data() != 0) {
550 accesses.push(mach);
551 }
552 break;
553 default:
554 break;
555 }
556 }
557 }
558
559 // Find dominating allocations for each memory access (store or atomic
560 // operation) and elide barriers if there is no safepoint poll in between.
561 elide_dominated_barriers(accesses, allocations);
562 }
563
564 void G1BarrierSetC2::late_barrier_analysis() const {
565 compute_liveness_at_stubs();
566 analyze_dominating_barriers();
567 }
568
569 void G1BarrierSetC2::emit_stubs(CodeBuffer& cb) const {
570 MacroAssembler masm(&cb);
571 GrowableArray<G1BarrierStubC2*>* const stubs = barrier_set_state()->stubs();
572 for (int i = 0; i < stubs->length(); i++) {
573 // Make sure there is enough space in the code buffer
574 if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
575 ciEnv::current()->record_failure("CodeCache is full");
576 return;
577 }
578 stubs->at(i)->emit_code(masm);
579 }
580 masm.flush();
581 }
582
583 #ifndef PRODUCT
584 void G1BarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const {
585 if ((mach->barrier_data() & G1C2BarrierPre) != 0) {
586 st->print("pre ");
587 }
588 if ((mach->barrier_data() & G1C2BarrierPost) != 0) {
589 st->print("post ");
590 }
591 if ((mach->barrier_data() & G1C2BarrierPostNotNull) != 0) {
592 st->print("notnull ");
593 }
594 }
595 #endif // !PRODUCT