1 /*
  2  * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "gc/shared/barrierSet.hpp"
 27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
 28 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 29 #include "gc/shenandoah/shenandoahForwarding.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.hpp"
 31 #include "gc/shenandoah/shenandoahRuntime.hpp"
 32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 33 #include "opto/arraycopynode.hpp"
 34 #include "opto/escape.hpp"
 35 #include "opto/graphKit.hpp"
 36 #include "opto/idealKit.hpp"
 37 #include "opto/macro.hpp"
 38 #include "opto/narrowptrnode.hpp"
 39 #include "opto/output.hpp"
 40 #include "opto/rootnode.hpp"
 41 #include "opto/runtime.hpp"
 42 
 43 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
 44   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
 45 }
 46 
 47 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) :
 48     BarrierSetC2State(comp_arena),
 49     _stubs(new (comp_arena) GrowableArray<ShenandoahBarrierStubC2*>(comp_arena, 8,  0, nullptr)),
 50     _stubs_start_offset(0) {
 51 }
 52 
 53 #define __ kit->
 54 
 55 static bool satb_can_remove_pre_barrier(GraphKit* kit, PhaseValues* phase, Node* adr,
 56                                         BasicType bt, uint adr_idx) {
 57   intptr_t offset = 0;
 58   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 59   AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
 60 
 61   if (offset == Type::OffsetBot) {
 62     return false; // cannot unalias unless there are precise offsets
 63   }
 64 
 65   if (alloc == nullptr) {
 66     return false; // No allocation found
 67   }
 68 
 69   intptr_t size_in_bytes = type2aelembytes(bt);
 70 
 71   Node* mem = __ memory(adr_idx); // start searching here...
 72 
 73   for (int cnt = 0; cnt < 50; cnt++) {
 74 
 75     if (mem->is_Store()) {
 76 
 77       Node* st_adr = mem->in(MemNode::Address);
 78       intptr_t st_offset = 0;
 79       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 80 
 81       if (st_base == nullptr) {
 82         break; // inscrutable pointer
 83       }
 84 
 85       // Break we have found a store with same base and offset as ours so break
 86       if (st_base == base && st_offset == offset) {
 87         break;
 88       }
 89 
 90       if (st_offset != offset && st_offset != Type::OffsetBot) {
 91         const int MAX_STORE = BytesPerLong;
 92         if (st_offset >= offset + size_in_bytes ||
 93             st_offset <= offset - MAX_STORE ||
 94             st_offset <= offset - mem->as_Store()->memory_size()) {
 95           // Success:  The offsets are provably independent.
 96           // (You may ask, why not just test st_offset != offset and be done?
 97           // The answer is that stores of different sizes can co-exist
 98           // in the same sequence of RawMem effects.  We sometimes initialize
 99           // a whole 'tile' of array elements with a single jint or jlong.)
100           mem = mem->in(MemNode::Memory);
101           continue; // advance through independent store memory
102         }
103       }
104 
105       if (st_base != base
106           && MemNode::detect_ptr_independence(base, alloc, st_base,
107                                               AllocateNode::Ideal_allocation(st_base),
108                                               phase)) {
109         // Success:  The bases are provably independent.
110         mem = mem->in(MemNode::Memory);
111         continue; // advance through independent store memory
112       }
113     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
114 
115       InitializeNode* st_init = mem->in(0)->as_Initialize();
116       AllocateNode* st_alloc = st_init->allocation();
117 
118       // Make sure that we are looking at the same allocation site.
119       // The alloc variable is guaranteed to not be null here from earlier check.
120       if (alloc == st_alloc) {
121         // Check that the initialization is storing null so that no previous store
122         // has been moved up and directly write a reference
123         Node* captured_store = st_init->find_captured_store(offset,
124                                                             type2aelembytes(T_OBJECT),
125                                                             phase);
126         if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
127           return true;
128         }
129       }
130     }
131 
132     // Unless there is an explicit 'continue', we must bail out here,
133     // because 'mem' is an inscrutable memory state (e.g., a call).
134     break;
135   }
136 
137   return false;
138 }
139 
140 static bool shenandoah_can_remove_post_barrier(GraphKit* kit, PhaseValues* phase, Node* store_ctrl, Node* adr) {
141   intptr_t      offset = 0;
142   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
143   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base);
144 
145   if (offset == Type::OffsetBot) {
146     return false; // Cannot unalias unless there are precise offsets.
147   }
148   if (alloc == nullptr) {
149     return false; // No allocation found.
150   }
151 
152   Node* mem = store_ctrl;   // Start search from Store node.
153   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
154     InitializeNode* st_init = mem->in(0)->as_Initialize();
155     AllocateNode*  st_alloc = st_init->allocation();
156     // Make sure we are looking at the same allocation
157     if (alloc == st_alloc) {
158       return true;
159     }
160   }
161 
162   return false;
163 }
164 
165 bool ShenandoahBarrierSetC2::is_shenandoah_clone_call(Node* call) {
166   return call->is_CallLeaf() &&
167          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::clone_barrier);
168 }
169 
170 const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() {
171   const Type **fields = TypeTuple::fields(1);
172   fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
173   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
174 
175   // create result type (range)
176   fields = TypeTuple::fields(0);
177   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
178 
179   return TypeFunc::make(domain, range);
180 }
181 
182 static uint8_t get_store_barrier(C2Access& access) {
183   if (!access.is_parse_access()) {
184     // Only support for eliding barriers at parse time for now.
185     return ShenandoahBarrierSATB | ShenandoahBarrierCardMark;
186   }
187   GraphKit* kit = (static_cast<C2ParseAccess&>(access)).kit();
188   Node* ctl = kit->control();
189   Node* adr = access.addr().node();
190   uint adr_idx = kit->C->get_alias_index(access.addr().type());
191   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
192 
193   bool can_remove_pre_barrier = satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, access.type(), adr_idx);
194 
195   // We can skip marks on a freshly-allocated object in Eden. Keep this code in
196   // sync with CardTableBarrierSet::on_slowpath_allocation_exit. That routine
197   // informs GC to take appropriate compensating steps, upon a slow-path
198   // allocation, so as to make this card-mark elision safe.
199   // The post-barrier can also be removed if null is written. This case is
200   // handled by ShenandoahBarrierSetC2::expand_barriers, which runs at the end of C2's
201   // platform-independent optimizations to exploit stronger type information.
202   bool can_remove_post_barrier = ReduceInitialCardMarks &&
203     ((access.base() == kit->just_allocated_object(ctl)) ||
204      shenandoah_can_remove_post_barrier(kit, &kit->gvn(), ctl, adr));
205 
206   int barriers = 0;
207   if (!can_remove_pre_barrier) {
208     barriers |= ShenandoahBarrierSATB;
209   }
210   if (!can_remove_post_barrier) {
211     barriers |= ShenandoahBarrierCardMark;
212   }
213 
214   return barriers;
215 }
216 
217 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
218   DecoratorSet decorators = access.decorators();
219   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
220   bool in_heap = (decorators & IN_HEAP) != 0;
221   bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
222   bool need_store_barrier = !(tightly_coupled_alloc && ReduceInitialCardMarks) && (in_heap || anonymous);
223   bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
224   if (access.is_oop() && need_store_barrier) {
225     access.set_barrier_data(get_store_barrier(access));
226     if (tightly_coupled_alloc) {
227       assert(!ReduceInitialCardMarks,
228              "post-barriers are only needed for tightly-coupled initialization stores when ReduceInitialCardMarks is disabled");
229       // Pre-barriers are unnecessary for tightly-coupled initialization stores.
230       access.set_barrier_data(access.barrier_data() & ~ShenandoahBarrierSATB);
231     }
232   }
233   if (no_keepalive) {
234     // No keep-alive means no need for the pre-barrier.
235     access.set_barrier_data(access.barrier_data() & ~ShenandoahBarrierSATB);
236   }
237   return BarrierSetC2::store_at_resolved(access, val);
238 }
239 
240 static void set_barrier_data(C2Access& access) {
241   if (!access.is_oop()) {
242     return;
243   }
244 
245   if (access.decorators() & C2_TIGHTLY_COUPLED_ALLOC) {
246     access.set_barrier_data(ShenandoahBarrierElided);
247     return;
248   }
249 
250   uint8_t barrier_data = 0;
251 
252   if (access.decorators() & ON_PHANTOM_OOP_REF) {
253     barrier_data |= ShenandoahBarrierPhantom;
254   } else if (access.decorators() & ON_WEAK_OOP_REF) {
255     barrier_data |= ShenandoahBarrierWeak;
256   } else {
257     barrier_data |= ShenandoahBarrierStrong;
258   }
259 
260   if (access.decorators() & IN_NATIVE) {
261     barrier_data |= ShenandoahBarrierNative;
262   }
263 
264   access.set_barrier_data(barrier_data);
265 }
266 
267 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
268   // 1: non-reference load, no additional barrier is needed
269   if (!access.is_oop()) {
270     return BarrierSetC2::load_at_resolved(access, val_type);
271   }
272 
273   // 2. Set barrier data for LRB.
274   set_barrier_data(access);
275 
276   // 3. If we are reading the value of the referent field of a Reference object, we
277   // need to record the referent in an SATB log buffer using the pre-barrier
278   // mechanism.
279   DecoratorSet decorators = access.decorators();
280   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
281   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
282   bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
283   // If we are reading the value of the referent field of a Reference object, we
284   // need to record the referent in an SATB log buffer using the pre-barrier
285   // mechanism. Also we need to add a memory barrier to prevent commoning reads
286   // from this field across safepoints, since GC can change its value.
287   uint8_t barriers = access.barrier_data();
288   bool need_read_barrier = ((on_weak || on_phantom) && !no_keepalive);
289   if (access.is_oop() && need_read_barrier) {
290     barriers |= ShenandoahBarrierSATB;
291   }
292   access.set_barrier_data(barriers);
293 
294   return BarrierSetC2::load_at_resolved(access, val_type);
295 }
296 
297 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
298                                                              Node* new_val, const Type* value_type) const {
299   if (ShenandoahCASBarrier) {
300     set_barrier_data(access);
301   }
302 
303   if (access.is_oop()) {
304     access.set_barrier_data(access.barrier_data() | ShenandoahBarrierSATB | ShenandoahBarrierCardMark);
305   }
306   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
307 }
308 
309 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
310                                                               Node* new_val, const Type* value_type) const {
311   if (ShenandoahCASBarrier) {
312     set_barrier_data(access);
313   }
314   GraphKit* kit = access.kit();
315   if (access.is_oop()) {
316     access.set_barrier_data(access.barrier_data() | ShenandoahBarrierSATB | ShenandoahBarrierCardMark);
317   }
318   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
319 }
320 
321 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
322   if (access.is_oop()) {
323     access.set_barrier_data(ShenandoahBarrierStrong | ShenandoahBarrierSATB | ShenandoahBarrierCardMark);
324   }
325   return BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
326 }
327 
328 
329 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
330   return is_shenandoah_clone_call(node);
331 }
332 
333 static void refine_barrier_by_new_val_type(const Node* n) {
334   if (n->Opcode() != Op_StoreP && n->Opcode() != Op_StoreN) {
335     return;
336   }
337   MemNode* store = n->as_Mem();
338   const Node* newval = n->in(MemNode::ValueIn);
339   assert(newval != nullptr, "");
340   const Type* newval_bottom = newval->bottom_type();
341   TypePtr::PTR newval_type = newval_bottom->make_ptr()->ptr();
342   uint8_t barrier_data = store->barrier_data();
343   if (!newval_bottom->isa_oopptr() &&
344       !newval_bottom->isa_narrowoop() &&
345       newval_type != TypePtr::Null) {
346     // newval is neither an OOP nor null, so there is no barrier to refine.
347     assert(barrier_data == 0, "non-OOP stores should have no barrier data");
348     return;
349   }
350   if (barrier_data == 0) {
351     // No barrier to refine.
352     return;
353   }
354   if (newval_type == TypePtr::Null) {
355     // Simply elide post-barrier if writing null.
356     barrier_data &= ~ShenandoahBarrierCardMark;
357     barrier_data &= ~ShenandoahBarrierCardMarkNotNull;
358   } else if ((barrier_data & ShenandoahBarrierCardMark) != 0 &&
359              newval_type == TypePtr::NotNull) {
360     // If the post-barrier has not been elided yet (e.g. due to newval being
361     // freshly allocated), mark it as not-null (simplifies barrier tests and
362     // compressed OOPs logic).
363     barrier_data |= ShenandoahBarrierCardMarkNotNull;
364   }
365   store->set_barrier_data(barrier_data);
366 }
367 
368 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
369   ResourceMark rm;
370   VectorSet visited;
371   Node_List worklist;
372   worklist.push(C->root());
373   while (worklist.size() > 0) {
374     Node* n = worklist.pop();
375     if (visited.test_set(n->_idx)) {
376       continue;
377     }
378     refine_barrier_by_new_val_type(n);
379     for (uint j = 0; j < n->req(); j++) {
380       Node* in = n->in(j);
381       if (in != nullptr) {
382         worklist.push(in);
383       }
384     }
385   }
386   return false;
387 }
388 
389 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
390   bool is_oop = is_reference_type(type);
391   if (!is_oop) {
392     return false;
393   }
394   if (ShenandoahSATBBarrier && tightly_coupled_alloc) {
395     if (phase == Optimization) {
396       return false;
397     }
398     return !is_clone;
399   }
400   return true;
401 }
402 
403 bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
404   const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
405   if (src_type->isa_instptr() != nullptr) {
406     ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
407     if ((src_type->klass_is_exact() || !ik->has_subklass()) && !ik->has_injected_fields()) {
408       if (ik->has_object_fields()) {
409         return true;
410       } else {
411         if (!src_type->klass_is_exact()) {
412           Compile::current()->dependencies()->assert_leaf_type(ik);
413         }
414       }
415     } else {
416       return true;
417         }
418   } else if (src_type->isa_aryptr()) {
419     BasicType src_elem = src_type->isa_aryptr()->elem()->array_element_basic_type();
420     if (is_reference_type(src_elem, true)) {
421       return true;
422     }
423   } else {
424     return true;
425   }
426   return false;
427 }
428 
429 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
430   Node* ctrl = ac->in(TypeFunc::Control);
431   Node* mem = ac->in(TypeFunc::Memory);
432   Node* src_base = ac->in(ArrayCopyNode::Src);
433   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
434   Node* dest_base = ac->in(ArrayCopyNode::Dest);
435   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
436   Node* length = ac->in(ArrayCopyNode::Length);
437 
438   Node* src = phase->basic_plus_adr(src_base, src_offset);
439   Node* dest = phase->basic_plus_adr(dest_base, dest_offset);
440 
441   if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
442     // Check if heap is has forwarded objects. If it does, we need to call into the special
443     // routine that would fix up source references before we can continue.
444 
445     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
446     Node* region = new RegionNode(PATH_LIMIT);
447     Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
448 
449     Node* thread = phase->transform_later(new ThreadLocalNode());
450     Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
451     Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
452 
453     uint gc_state_idx = Compile::AliasIdxRaw;
454     const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
455     DEBUG_ONLY(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
456 
457     Node* gc_state    = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
458     Node* stable_and  = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)));
459     Node* stable_cmp  = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
460     Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
461 
462     IfNode* stable_iff  = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
463     Node* stable_ctrl   = phase->transform_later(new IfFalseNode(stable_iff));
464     Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
465 
466     // Heap is stable, no need to do anything additional
467     region->init_req(_heap_stable, stable_ctrl);
468     mem_phi->init_req(_heap_stable, mem);
469 
470     // Heap is unstable, call into clone barrier stub
471     Node* call = phase->make_leaf_call(unstable_ctrl, mem,
472                                        ShenandoahBarrierSetC2::clone_barrier_Type(),
473                                        CAST_FROM_FN_PTR(address, ShenandoahRuntime::clone_barrier),
474                                        "shenandoah_clone",
475                                        TypeRawPtr::BOTTOM,
476                                        src_base);
477     call = phase->transform_later(call);
478 
479     ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control));
480     mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory));
481     region->init_req(_heap_unstable, ctrl);
482     mem_phi->init_req(_heap_unstable, mem);
483 
484     // Wire up the actual arraycopy stub now
485     ctrl = phase->transform_later(region);
486     mem = phase->transform_later(mem_phi);
487 
488     const char* name = "arraycopy";
489     call = phase->make_leaf_call(ctrl, mem,
490                                  OptoRuntime::fast_arraycopy_Type(),
491                                  phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, name, true),
492                                  name, TypeRawPtr::BOTTOM,
493                                  src, dest, length
494                                  LP64_ONLY(COMMA phase->top()));
495     call = phase->transform_later(call);
496 
497     // Hook up the whole thing into the graph
498     phase->igvn().replace_node(ac, call);
499   } else {
500     BarrierSetC2::clone_at_expansion(phase, ac);
501   }
502 }
503 
504 
505 // Support for macro expanded GC barriers
506 void ShenandoahBarrierSetC2::eliminate_gc_barrier_data(Node* node) const {
507   if (node->is_LoadStore()) {
508     LoadStoreNode* loadstore = node->as_LoadStore();
509     loadstore->set_barrier_data(0);
510   } else if (node->is_Mem()) {
511     MemNode* mem = node->as_Mem();
512     mem->set_barrier_data(0);
513   }
514 }
515 
516 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
517   eliminate_gc_barrier_data(node);
518 }
519 
520 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
521   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
522 }
523 
524 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
525   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
526 }
527 
528 #ifdef ASSERT
529 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
530   // TODO: Re-implement C2 barrier verification.
531 }
532 #endif
533 
534 static ShenandoahBarrierSetC2State* barrier_set_state() {
535   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
536 }
537 
538 int ShenandoahBarrierSetC2::estimate_stub_size() const {
539   Compile* const C = Compile::current();
540   BufferBlob* const blob = C->output()->scratch_buffer_blob();
541   GrowableArray<ShenandoahBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
542   int size = 0;
543 
544   for (int i = 0; i < stubs->length(); i++) {
545     CodeBuffer cb(blob->content_begin(), checked_cast<CodeBuffer::csize_t>((address)C->output()->scratch_locs_memory() - blob->content_begin()));
546     MacroAssembler masm(&cb);
547     stubs->at(i)->emit_code(masm);
548     size += cb.insts_size();
549   }
550 
551   return size;
552 }
553 
554 void ShenandoahBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
555   MacroAssembler masm(&cb);
556   GrowableArray<ShenandoahBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
557   barrier_set_state()->set_stubs_start_offset(masm.offset());
558 
559   for (int i = 0; i < stubs->length(); i++) {
560     // Make sure there is enough space in the code buffer
561     if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
562       ciEnv::current()->record_failure("CodeCache is full");
563       return;
564     }
565 
566     stubs->at(i)->emit_code(masm);
567   }
568 
569   masm.flush();
570 
571 }
572 
573 void ShenandoahBarrierStubC2::register_stub() {
574   if (!Compile::current()->output()->in_scratch_emit_size()) {
575     barrier_set_state()->stubs()->append(this);
576   }
577 }
578 
579 ShenandoahLoadRefBarrierStubC2* ShenandoahLoadRefBarrierStubC2::create(const MachNode* node, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
580   auto* stub = new (Compile::current()->comp_arena()) ShenandoahLoadRefBarrierStubC2(node, obj, addr, tmp1, tmp2, tmp3, narrow);
581   stub->register_stub();
582   return stub;
583 }
584 
585 ShenandoahSATBBarrierStubC2* ShenandoahSATBBarrierStubC2::create(const MachNode* node, Register addr, Register preval, Register tmp) {
586   auto* stub = new (Compile::current()->comp_arena()) ShenandoahSATBBarrierStubC2(node, addr, preval, tmp);
587   stub->register_stub();
588   return stub;
589 }
590 
591 ShenandoahCASBarrierSlowStubC2* ShenandoahCASBarrierSlowStubC2::create(const MachNode* node, Register addr, Register expected, Register new_val, Register result, Register tmp, bool cae, bool acquire, bool release, bool weak) {
592   auto* stub = new (Compile::current()->comp_arena()) ShenandoahCASBarrierSlowStubC2(node, addr, Address(), expected, new_val, result, tmp, noreg, cae, acquire, release, weak);
593   stub->register_stub();
594   return stub;
595 }
596 
597 ShenandoahCASBarrierSlowStubC2* ShenandoahCASBarrierSlowStubC2::create(const MachNode* node, Address addr, Register expected, Register new_val, Register result, Register tmp1, Register tmp2, bool cae) {
598   auto* stub = new (Compile::current()->comp_arena()) ShenandoahCASBarrierSlowStubC2(node, noreg, addr, expected, new_val, result, tmp1, tmp2, cae, false, false, false);
599   stub->register_stub();
600   return stub;
601 }
602 
603 ShenandoahCASBarrierMidStubC2* ShenandoahCASBarrierMidStubC2::create(const MachNode* node, ShenandoahCASBarrierSlowStubC2* slow_stub, Register expected, Register result, Register tmp, bool cae) {
604   auto* stub = new (Compile::current()->comp_arena()) ShenandoahCASBarrierMidStubC2(node, slow_stub, expected, result, tmp, cae);
605   stub->register_stub();
606   return stub;
607 }
608 
609 bool ShenandoahBarrierSetC2State::needs_liveness_data(const MachNode* mach) const {
610   //assert(mach->barrier_data() != 0, "what else?");
611   // return mach->barrier_data() != 0;
612   //return (mach->barrier_data() & ShenandoahSATBBarrier) != 0;
613   return ShenandoahSATBBarrierStubC2::needs_barrier(mach) || ShenandoahLoadRefBarrierStubC2::needs_barrier(mach);
614 }
615 
616 bool ShenandoahBarrierSetC2State::needs_livein_data() const {
617   return true;
618 }