1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.hpp"
26 #include "code/vmreg.inline.hpp"
27 #include "gc/g1/c2/g1BarrierSetC2.hpp"
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1BarrierSetAssembler.hpp"
30 #include "gc/g1/g1BarrierSetRuntime.hpp"
31 #include "gc/g1/g1CardTable.hpp"
32 #include "gc/g1/g1HeapRegion.hpp"
33 #include "gc/g1/g1ThreadLocalData.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/block.hpp"
36 #include "opto/compile.hpp"
37 #include "opto/escape.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/machnode.hpp"
41 #include "opto/macro.hpp"
42 #include "opto/memnode.hpp"
43 #include "opto/node.hpp"
44 #include "opto/output.hpp"
45 #include "opto/regalloc.hpp"
46 #include "opto/rootnode.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/type.hpp"
49 #include "utilities/growableArray.hpp"
50 #include "utilities/macros.hpp"
51
52 /*
53 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
54 * required by SATB to make sure all objects live at the start of the
55 * marking are kept alive, all reference updates need to any previous
56 * reference stored before writing.
57 *
58 * If the previous value is null there is no need to save the old value.
59 * References that are null are filtered during runtime by the barrier
60 * code to avoid unnecessary queuing.
61 *
62 * However in the case of newly allocated objects it might be possible to
63 * prove that the reference about to be overwritten is null during compile
64 * time and avoid adding the barrier code completely.
65 *
66 * The compiler needs to determine that the object in which a field is about
67 * to be written is newly allocated, and that no prior store to the same field
68 * has happened since the allocation.
69 */
70 bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
71 PhaseValues* phase,
72 Node* adr,
73 BasicType bt,
74 uint adr_idx) const {
75 intptr_t offset = 0;
76 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
77 AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
78
79 if (offset == Type::OffsetBot) {
80 return false; // Cannot unalias unless there are precise offsets.
81 }
82 if (alloc == nullptr) {
83 return false; // No allocation found.
84 }
85
86 intptr_t size_in_bytes = type2aelembytes(bt);
87 Node* mem = kit->memory(adr_idx); // Start searching here.
88
89 for (int cnt = 0; cnt < 50; cnt++) {
90 if (mem->is_Store()) {
91 Node* st_adr = mem->in(MemNode::Address);
92 intptr_t st_offset = 0;
93 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
94
95 if (st_base == nullptr) {
96 break; // Inscrutable pointer.
97 }
98 if (st_base == base && st_offset == offset) {
99 // We have found a store with same base and offset as ours.
100 break;
101 }
102 if (st_offset != offset && st_offset != Type::OffsetBot) {
103 const int MAX_STORE = BytesPerLong;
104 if (st_offset >= offset + size_in_bytes ||
105 st_offset <= offset - MAX_STORE ||
106 st_offset <= offset - mem->as_Store()->memory_size()) {
107 // Success: The offsets are provably independent.
108 // (You may ask, why not just test st_offset != offset and be done?
109 // The answer is that stores of different sizes can co-exist
110 // in the same sequence of RawMem effects. We sometimes initialize
111 // a whole 'tile' of array elements with a single jint or jlong.)
112 mem = mem->in(MemNode::Memory);
113 continue; // Advance through independent store memory.
114 }
115 }
116 if (st_base != base
117 && MemNode::detect_ptr_independence(base, alloc, st_base,
118 AllocateNode::Ideal_allocation(st_base),
119 phase)) {
120 // Success: the bases are provably independent.
121 mem = mem->in(MemNode::Memory);
122 continue; // Advance through independent store memory.
123 }
124 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
125 InitializeNode* st_init = mem->in(0)->as_Initialize();
126 AllocateNode* st_alloc = st_init->allocation();
127
128 // Make sure that we are looking at the same allocation site.
129 // The alloc variable is guaranteed to not be null here from earlier check.
130 if (alloc == st_alloc) {
131 // Check that the initialization is storing null so that no previous store
132 // has been moved up and directly write a reference.
133 Node* captured_store = st_init->find_captured_store(offset,
134 type2aelembytes(T_OBJECT),
135 phase);
136 if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
137 return true;
138 }
139 }
140 }
141 // Unless there is an explicit 'continue', we must bail out here,
142 // because 'mem' is an inscrutable memory state (e.g., a call).
143 break;
144 }
145 return false;
146 }
147
148 /*
149 * G1, similar to any GC with a Young Generation, requires a way to keep track
150 * of references from Old Generation to Young Generation to make sure all live
151 * objects are found. G1 also requires to keep track of object references
152 * between different regions to enable evacuation of old regions, which is done
153 * as part of mixed collections. References are tracked in remembered sets,
154 * which are continuously updated as references are written to with the help of
155 * the post-barrier.
156 *
157 * To reduce the number of updates to the remembered set, the post-barrier
158 * filters out updates to fields in objects located in the Young Generation, the
159 * same region as the reference, when null is being written, or if the card is
160 * already marked as dirty by an earlier write.
161 *
162 * Under certain circumstances it is possible to avoid generating the
163 * post-barrier completely, if it is possible during compile time to prove the
164 * object is newly allocated and that no safepoint exists between the allocation
165 * and the store. This can be seen as a compile-time version of the
166 * above-mentioned Young Generation filter.
167 *
168 * In the case of a slow allocation, the allocation code must handle the barrier
169 * as part of the allocation if the allocated object is not located in the
170 * nursery; this would happen for humongous objects.
171 */
172 bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
173 PhaseValues* phase, Node* store_ctrl,
174 Node* adr) const {
175 intptr_t offset = 0;
176 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
177 AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
178
179 if (offset == Type::OffsetBot) {
180 return false; // Cannot unalias unless there are precise offsets.
181 }
182 if (alloc == nullptr) {
183 return false; // No allocation found.
184 }
185
186 Node* mem = store_ctrl; // Start search from Store node.
187 if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
188 InitializeNode* st_init = mem->in(0)->as_Initialize();
189 AllocateNode* st_alloc = st_init->allocation();
190 // Make sure we are looking at the same allocation
191 if (alloc == st_alloc) {
192 return true;
193 }
194 }
195
196 return false;
197 }
198
199 Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
200 DecoratorSet decorators = access.decorators();
201 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
202 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
203 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
204 // If we are reading the value of the referent field of a Reference object, we
205 // need to record the referent in an SATB log buffer using the pre-barrier
206 // mechanism. Also we need to add a memory barrier to prevent commoning reads
207 // from this field across safepoints, since GC can change its value.
208 bool need_read_barrier = ((on_weak || on_phantom) && !no_keepalive);
209 if (access.is_oop() && need_read_barrier) {
210 access.set_barrier_data(G1C2BarrierPre);
211 }
212 return CardTableBarrierSetC2::load_at_resolved(access, val_type);
213 }
214
215 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
216 eliminate_gc_barrier_data(node);
217 }
218
219 void G1BarrierSetC2::eliminate_gc_barrier_data(Node* node) const {
220 if (node->is_LoadStore()) {
221 LoadStoreNode* loadstore = node->as_LoadStore();
222 loadstore->set_barrier_data(0);
223 } else if (node->is_Mem()) {
224 MemNode* mem = node->as_Mem();
225 mem->set_barrier_data(0);
226 }
227 }
228
229 static void refine_barrier_by_new_val_type(const Node* n) {
230 if (n->Opcode() != Op_StoreP &&
231 n->Opcode() != Op_StoreN) {
232 return;
233 }
234 MemNode* store = n->as_Mem();
235 const Node* newval = n->in(MemNode::ValueIn);
236 assert(newval != nullptr, "");
237 const Type* newval_bottom = newval->bottom_type();
238 TypePtr::PTR newval_type = newval_bottom->make_ptr()->ptr();
239 uint8_t barrier_data = store->barrier_data();
240 if (!newval_bottom->isa_oopptr() &&
241 !newval_bottom->isa_narrowoop() &&
242 newval_type != TypePtr::Null) {
243 // newval is neither an OOP nor null, so there is no barrier to refine.
244 assert(barrier_data == 0, "non-OOP stores should have no barrier data");
245 return;
246 }
247 if (barrier_data == 0) {
248 // No barrier to refine.
249 return;
250 }
251 if (newval_type == TypePtr::Null) {
252 // Simply elide post-barrier if writing null.
253 barrier_data &= ~G1C2BarrierPost;
254 barrier_data &= ~G1C2BarrierPostNotNull;
255 } else if (((barrier_data & G1C2BarrierPost) != 0) &&
256 newval_type == TypePtr::NotNull) {
257 // If the post-barrier has not been elided yet (e.g. due to newval being
258 // freshly allocated), mark it as not-null (simplifies barrier tests and
259 // compressed OOPs logic).
260 barrier_data |= G1C2BarrierPostNotNull;
261 }
262 store->set_barrier_data(barrier_data);
263 return;
264 }
265
266 // Refine (not really expand) G1 barriers by looking at the new value type
267 // (whether it is necessarily null or necessarily non-null).
268 bool G1BarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
269 ResourceMark rm;
270 VectorSet visited;
271 Node_List worklist;
272 worklist.push(C->root());
273 while (worklist.size() > 0) {
274 Node* n = worklist.pop();
275 if (visited.test_set(n->_idx)) {
276 continue;
277 }
278 refine_barrier_by_new_val_type(n);
279 for (uint j = 0; j < n->req(); j++) {
280 Node* in = n->in(j);
281 if (in != nullptr) {
282 worklist.push(in);
283 }
284 }
285 }
286 return false;
287 }
288
289 uint G1BarrierSetC2::estimated_barrier_size(const Node* node) const {
290 uint8_t barrier_data = MemNode::barrier_data(node);
291 uint nodes = 0;
292 if ((barrier_data & G1C2BarrierPre) != 0) {
293 // Only consider the fast path for the barrier that is
294 // actually inlined into the main code stream.
295 // The slow path is laid out separately and does not
296 // directly affect performance.
297 // It has a cost of 6 (AddP, LoadB, Cmp, Bool, If, IfProj).
298 nodes += 6;
299 }
300 if ((barrier_data & G1C2BarrierPost) != 0) {
301 // Approximate the number of nodes needed; an if costs 4 nodes (Cmp, Bool,
302 // If, If projection), any other (Assembly) instruction is approximated with
303 // a cost of 1.
304 nodes += 4 // base cost for the card write containing getting base offset, address calculation and the card write;
305 + 6 // same region check: Uncompress (new_val) oop, xor, shr, (cmp), jmp
306 + 4 // new_val is null check
307 + (UseCondCardMark ? 4 : 0); // card not clean check.
308 }
309 return nodes;
310 }
311
312 bool G1BarrierSetC2::can_initialize_object(const StoreNode* store) const {
313 assert(store->Opcode() == Op_StoreP || store->Opcode() == Op_StoreN, "OOP store expected");
314 // It is OK to move the store across the object initialization boundary only
315 // if it does not have any barrier, or if it has barriers that can be safely
316 // elided (because of the compensation steps taken on the allocation slow path
317 // when ReduceInitialCardMarks is enabled).
318 return (MemNode::barrier_data(store) == 0) || use_ReduceInitialCardMarks();
319 }
320
321 void G1BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
322 if (ac->is_clone_inst() && !use_ReduceInitialCardMarks()) {
323 clone_in_runtime(phase, ac, G1BarrierSetRuntime::clone_addr(), "G1BarrierSetRuntime::clone");
324 return;
325 }
326 BarrierSetC2::clone_at_expansion(phase, ac);
327 }
328
329 Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
330 DecoratorSet decorators = access.decorators();
331 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
332 bool in_heap = (decorators & IN_HEAP) != 0;
333 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
334 bool need_store_barrier = !(tightly_coupled_alloc && use_ReduceInitialCardMarks()) && (in_heap || anonymous);
335 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
336 if (access.is_oop() && need_store_barrier) {
337 access.set_barrier_data(get_store_barrier(access));
338 if (tightly_coupled_alloc) {
339 assert(!use_ReduceInitialCardMarks(),
340 "post-barriers are only needed for tightly-coupled initialization stores when ReduceInitialCardMarks is disabled");
341 // Pre-barriers are unnecessary for tightly-coupled initialization stores.
342 access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre);
343 }
344 }
345 if (no_keepalive) {
346 // No keep-alive means no need for the pre-barrier.
347 access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre);
348 }
349 return BarrierSetC2::store_at_resolved(access, val);
350 }
351
352 Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
353 Node* new_val, const Type* value_type) const {
354 GraphKit* kit = access.kit();
355 if (!access.is_oop()) {
356 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
357 }
358 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
359 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
360 }
361
362 Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
363 Node* new_val, const Type* value_type) const {
364 GraphKit* kit = access.kit();
365 if (!access.is_oop()) {
366 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
367 }
368 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
369 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
370 }
371
372 Node* G1BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
373 GraphKit* kit = access.kit();
374 if (!access.is_oop()) {
375 return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
376 }
377 access.set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
378 return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
379 }
380
381 class G1BarrierSetC2State : public BarrierSetC2State {
382 private:
383 GrowableArray<G1BarrierStubC2*>* _stubs;
384
385 public:
386 G1BarrierSetC2State(Arena* arena)
387 : BarrierSetC2State(arena),
388 _stubs(new (arena) GrowableArray<G1BarrierStubC2*>(arena, 8, 0, nullptr)) {}
389
390 GrowableArray<G1BarrierStubC2*>* stubs() {
391 return _stubs;
392 }
393
394 bool needs_liveness_data(const MachNode* mach) const {
395 // Liveness data is only required to compute registers that must be preserved
396 // across the runtime call in the pre-barrier stub.
397 return G1BarrierStubC2::needs_pre_barrier(mach);
398 }
399
400 bool needs_livein_data() const {
401 return false;
402 }
403 };
404
405 static G1BarrierSetC2State* barrier_set_state() {
406 return reinterpret_cast<G1BarrierSetC2State*>(Compile::current()->barrier_set_state());
407 }
408
409 G1BarrierStubC2::G1BarrierStubC2(const MachNode* node) : BarrierStubC2(node) {}
410
411 bool G1BarrierStubC2::needs_pre_barrier(const MachNode* node) {
412 return (node->barrier_data() & G1C2BarrierPre) != 0;
413 }
414
415 bool G1BarrierStubC2::needs_post_barrier(const MachNode* node) {
416 return (node->barrier_data() & G1C2BarrierPost) != 0;
417 }
418
419 bool G1BarrierStubC2::post_new_val_may_be_null(const MachNode* node) {
420 return (node->barrier_data() & G1C2BarrierPostNotNull) == 0;
421 }
422
423 G1PreBarrierStubC2::G1PreBarrierStubC2(const MachNode* node) : G1BarrierStubC2(node) {}
424
425 bool G1PreBarrierStubC2::needs_barrier(const MachNode* node) {
426 return needs_pre_barrier(node);
427 }
428
429 G1PreBarrierStubC2* G1PreBarrierStubC2::create(const MachNode* node) {
430 G1PreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) G1PreBarrierStubC2(node);
431 if (!Compile::current()->output()->in_scratch_emit_size()) {
432 barrier_set_state()->stubs()->append(stub);
433 }
434 return stub;
435 }
436
437 void G1PreBarrierStubC2::initialize_registers(Register obj, Register pre_val, Register thread, Register tmp1, Register tmp2) {
438 _obj = obj;
439 _pre_val = pre_val;
440 _thread = thread;
441 _tmp1 = tmp1;
442 _tmp2 = tmp2;
443 }
444
445 Register G1PreBarrierStubC2::obj() const {
446 return _obj;
447 }
448
449 Register G1PreBarrierStubC2::pre_val() const {
450 return _pre_val;
451 }
452
453 Register G1PreBarrierStubC2::thread() const {
454 return _thread;
455 }
456
457 Register G1PreBarrierStubC2::tmp1() const {
458 return _tmp1;
459 }
460
461 Register G1PreBarrierStubC2::tmp2() const {
462 return _tmp2;
463 }
464
465 void G1PreBarrierStubC2::emit_code(MacroAssembler& masm) {
466 G1BarrierSetAssembler* bs = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
467 bs->generate_c2_pre_barrier_stub(&masm, this);
468 }
469
470 void* G1BarrierSetC2::create_barrier_state(Arena* comp_arena) const {
471 return new (comp_arena) G1BarrierSetC2State(comp_arena);
472 }
473
474 int G1BarrierSetC2::get_store_barrier(C2Access& access) const {
475 if (!access.is_parse_access()) {
476 // Only support for eliding barriers at parse time for now.
477 return G1C2BarrierPre | G1C2BarrierPost;
478 }
479 GraphKit* kit = (static_cast<C2ParseAccess&>(access)).kit();
480 Node* ctl = kit->control();
481 Node* adr = access.addr().node();
482 uint adr_idx = kit->C->get_alias_index(access.addr().type());
483 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
484
485 bool can_remove_pre_barrier = g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, access.type(), adr_idx);
486
487 // We can skip marks on a freshly-allocated object in Eden. Keep this code in
488 // sync with CardTableBarrierSet::on_slowpath_allocation_exit. That routine
489 // informs GC to take appropriate compensating steps, upon a slow-path
490 // allocation, so as to make this card-mark elision safe.
491 // The post-barrier can also be removed if null is written. This case is
492 // handled by G1BarrierSetC2::expand_barriers, which runs at the end of C2's
493 // platform-independent optimizations to exploit stronger type information.
494 bool can_remove_post_barrier = use_ReduceInitialCardMarks() &&
495 ((access.base() == kit->just_allocated_object(ctl)) ||
496 g1_can_remove_post_barrier(kit, &kit->gvn(), ctl, adr));
497
498 int barriers = 0;
499 if (!can_remove_pre_barrier) {
500 barriers |= G1C2BarrierPre;
501 }
502 if (!can_remove_post_barrier) {
503 barriers |= G1C2BarrierPost;
504 }
505
506 return barriers;
507 }
508
509 void G1BarrierSetC2::elide_dominated_barrier(MachNode* mach) const {
510 uint8_t barrier_data = mach->barrier_data();
511 barrier_data &= ~G1C2BarrierPre;
512 if (CardTableBarrierSetC2::use_ReduceInitialCardMarks()) {
513 barrier_data &= ~G1C2BarrierPost;
514 barrier_data &= ~G1C2BarrierPostNotNull;
515 }
516 mach->set_barrier_data(barrier_data);
517 }
518
519 void G1BarrierSetC2::analyze_dominating_barriers() const {
520 ResourceMark rm;
521 PhaseCFG* const cfg = Compile::current()->cfg();
522
523 // Find allocations and memory accesses (stores and atomic operations), and
524 // track them in lists.
525 Node_List accesses;
526 Node_List allocations;
527 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
528 const Block* const block = cfg->get_block(i);
529 for (uint j = 0; j < block->number_of_nodes(); ++j) {
530 Node* const node = block->get_node(j);
531 if (node->is_Phi()) {
532 if (BarrierSetC2::is_allocation(node)) {
533 allocations.push(node);
534 }
535 continue;
536 } else if (!node->is_Mach()) {
537 continue;
538 }
539
540 MachNode* const mach = node->as_Mach();
541 switch (mach->ideal_Opcode()) {
542 case Op_StoreP:
543 case Op_StoreN:
544 case Op_CompareAndExchangeP:
545 case Op_CompareAndSwapP:
546 case Op_GetAndSetP:
547 case Op_CompareAndExchangeN:
548 case Op_CompareAndSwapN:
549 case Op_GetAndSetN:
550 if (mach->barrier_data() != 0) {
551 accesses.push(mach);
552 }
553 break;
554 default:
555 break;
556 }
557 }
558 }
559
560 // Find dominating allocations for each memory access (store or atomic
561 // operation) and elide barriers if there is no safepoint poll in between.
562 elide_dominated_barriers(accesses, allocations);
563 }
564
565 void G1BarrierSetC2::late_barrier_analysis() const {
566 compute_liveness_at_stubs();
567 analyze_dominating_barriers();
568 }
569
570 void G1BarrierSetC2::emit_stubs(CodeBuffer& cb) const {
571 MacroAssembler masm(&cb);
572 GrowableArray<G1BarrierStubC2*>* const stubs = barrier_set_state()->stubs();
573 for (int i = 0; i < stubs->length(); i++) {
574 // Make sure there is enough space in the code buffer
575 if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
576 ciEnv::current()->record_failure("CodeCache is full");
577 return;
578 }
579 stubs->at(i)->emit_code(masm);
580 }
581 masm.flush();
582 }
583
584 #ifndef PRODUCT
585 void G1BarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const {
586 if ((mach->barrier_data() & G1C2BarrierPre) != 0) {
587 st->print("pre ");
588 }
589 if ((mach->barrier_data() & G1C2BarrierPost) != 0) {
590 st->print("post ");
591 }
592 if ((mach->barrier_data() & G1C2BarrierPostNotNull) != 0) {
593 st->print("notnull ");
594 }
595 }
596 #endif // !PRODUCT