1 /*
  2  * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "classfile/javaClasses.inline.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"

 29 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"


 30 #include "gc/shenandoah/shenandoahForwarding.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.hpp"
 32 #include "gc/shenandoah/shenandoahRuntime.hpp"
 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 34 #include "opto/arraycopynode.hpp"
 35 #include "opto/escape.hpp"
 36 #include "opto/graphKit.hpp"
 37 #include "opto/idealKit.hpp"
 38 #include "opto/macro.hpp"

 39 #include "opto/narrowptrnode.hpp"
 40 #include "opto/output.hpp"
 41 #include "opto/rootnode.hpp"
 42 #include "opto/runtime.hpp"
 43 
 44 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
 45   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
 46 }
 47 
 48 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) :
 49     BarrierSetC2State(comp_arena),
 50     _stubs(new (comp_arena) GrowableArray<ShenandoahBarrierStubC2*>(comp_arena, 8,  0, nullptr)),
 51     _stubs_start_offset(0) {

















 52 }
 53 
 54 #define __ kit->
 55 
 56 static bool satb_can_remove_pre_barrier(GraphKit* kit, PhaseValues* phase, Node* adr,
 57                                         BasicType bt, uint adr_idx) {
 58   intptr_t offset = 0;
 59   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 60   AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
 61 
 62   if (offset == Type::OffsetBot) {
 63     return false; // cannot unalias unless there are precise offsets
 64   }
 65 
 66   if (alloc == nullptr) {
 67     return false; // No allocation found
 68   }
 69 
 70   intptr_t size_in_bytes = type2aelembytes(bt);
 71 
 72   Node* mem = __ memory(adr_idx); // start searching here...
 73 
 74   for (int cnt = 0; cnt < 50; cnt++) {
 75 
 76     if (mem->is_Store()) {
 77 
 78       Node* st_adr = mem->in(MemNode::Address);
 79       intptr_t st_offset = 0;
 80       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
 81 
 82       if (st_base == nullptr) {
 83         break; // inscrutable pointer
 84       }
 85 
 86       // Break we have found a store with same base and offset as ours so break
 87       if (st_base == base && st_offset == offset) {
 88         break;
 89       }
 90 
 91       if (st_offset != offset && st_offset != Type::OffsetBot) {
 92         const int MAX_STORE = BytesPerLong;
 93         if (st_offset >= offset + size_in_bytes ||
 94             st_offset <= offset - MAX_STORE ||
 95             st_offset <= offset - mem->as_Store()->memory_size()) {
 96           // Success:  The offsets are provably independent.
 97           // (You may ask, why not just test st_offset != offset and be done?
 98           // The answer is that stores of different sizes can co-exist
 99           // in the same sequence of RawMem effects.  We sometimes initialize
100           // a whole 'tile' of array elements with a single jint or jlong.)
101           mem = mem->in(MemNode::Memory);
102           continue; // advance through independent store memory
103         }
104       }
105 
106       if (st_base != base
107           && MemNode::detect_ptr_independence(base, alloc, st_base,
108                                               AllocateNode::Ideal_allocation(st_base),
109                                               phase)) {
110         // Success:  The bases are provably independent.
111         mem = mem->in(MemNode::Memory);
112         continue; // advance through independent store memory
113       }
114     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
115 
116       InitializeNode* st_init = mem->in(0)->as_Initialize();
117       AllocateNode* st_alloc = st_init->allocation();
118 
119       // Make sure that we are looking at the same allocation site.
120       // The alloc variable is guaranteed to not be null here from earlier check.
121       if (alloc == st_alloc) {
122         // Check that the initialization is storing null so that no previous store
123         // has been moved up and directly write a reference
124         Node* captured_store = st_init->find_captured_store(offset,
125                                                             type2aelembytes(T_OBJECT),
126                                                             phase);
127         if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
128           return true;
129         }
130       }
131     }
132 
133     // Unless there is an explicit 'continue', we must bail out here,
134     // because 'mem' is an inscrutable memory state (e.g., a call).
135     break;
136   }
137 
138   return false;
139 }
140 
141 static bool shenandoah_can_remove_post_barrier(GraphKit* kit, PhaseValues* phase, Node* store_ctrl, Node* adr) {
142   intptr_t      offset = 0;
143   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
144   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base);



























































































































































145 
146   if (offset == Type::OffsetBot) {
147     return false; // Cannot unalias unless there are precise offsets.















148   }
149   if (alloc == nullptr) {
150     return false; // No allocation found.


















151   }
152 
153   Node* mem = store_ctrl;   // Start search from Store node.
154   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
155     InitializeNode* st_init = mem->in(0)->as_Initialize();
156     AllocateNode*  st_alloc = st_init->allocation();
157     // Make sure we are looking at the same allocation
158     if (alloc == st_alloc) {
159       return true;











160     }
161   }
162 
163   return false;


















































164 }
165 
166 static uint8_t get_store_barrier(C2Access& access) {
167   if (!access.is_parse_access()) {
168     // Only support for eliding barriers at parse time for now.
169     return ShenandoahBarrierSATB | ShenandoahBarrierCardMark;

































170   }
171   GraphKit* kit = (static_cast<C2ParseAccess&>(access)).kit();
172   Node* ctl = kit->control();
173   Node* adr = access.addr().node();
174   uint adr_idx = kit->C->get_alias_index(access.addr().type());
175   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
176 
177   bool can_remove_pre_barrier = satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, access.type(), adr_idx);
178 
179   // We can skip marks on a freshly-allocated object in Eden. Keep this code in
180   // sync with CardTableBarrierSet::on_slowpath_allocation_exit. That routine
181   // informs GC to take appropriate compensating steps, upon a slow-path
182   // allocation, so as to make this card-mark elision safe.
183   // The post-barrier can also be removed if null is written. This case is
184   // handled by ShenandoahBarrierSetC2::expand_barriers, which runs at the end of C2's
185   // platform-independent optimizations to exploit stronger type information.
186   bool can_remove_post_barrier = ReduceInitialCardMarks &&
187     ((access.base() == kit->just_allocated_object(ctl)) ||
188      shenandoah_can_remove_post_barrier(kit, &kit->gvn(), ctl, adr));
189 
190   int barriers = 0;
191   if (!can_remove_pre_barrier) {
192     barriers |= ShenandoahBarrierSATB;
193   } else {
194     barriers |= ShenandoahBarrierElided;










195   }
196 
197   if (!can_remove_post_barrier) {
198     barriers |= ShenandoahBarrierCardMark;
199   } else {
200     barriers |= ShenandoahBarrierElided;

201   }
202 
203   return barriers;










































204 }
205 
206 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
207   DecoratorSet decorators = access.decorators();
208   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
209   bool in_heap = (decorators & IN_HEAP) != 0;
210   bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
211   bool needs_pre_barrier = access.is_oop() && (in_heap || anonymous);
212   // Pre-barriers are unnecessary for tightly-coupled initialization stores.
213   bool can_be_elided = needs_pre_barrier && tightly_coupled_alloc && ReduceInitialCardMarks;
214   bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
215   if (needs_pre_barrier) {
216     if (can_be_elided) {
217       access.set_barrier_data(access.barrier_data() & ~ShenandoahBarrierSATB);
218       access.set_barrier_data(access.barrier_data() | ShenandoahBarrierElided);
219     } else {
220       access.set_barrier_data(get_store_barrier(access));
221     }
222   }

223   if (no_keepalive) {
224     // No keep-alive means no need for the pre-barrier.
225     access.set_barrier_data(access.barrier_data() & ~ShenandoahBarrierSATB);
226   }
227   return BarrierSetC2::store_at_resolved(access, val);
228 }
229 
230 static void set_barrier_data(C2Access& access) {
231   assert(access.is_oop(), "Precondition");

232 
233   if (access.decorators() & C2_TIGHTLY_COUPLED_ALLOC) {
234     access.set_barrier_data(ShenandoahBarrierElided);
235     return;
236   }
237 
238   uint8_t barrier_data = 0;
239 
240   if (access.decorators() & ON_PHANTOM_OOP_REF) {
241     barrier_data |= ShenandoahBarrierPhantom;
242   } else if (access.decorators() & ON_WEAK_OOP_REF) {
243     barrier_data |= ShenandoahBarrierWeak;




244   } else {
245     barrier_data |= ShenandoahBarrierStrong;
246   }
247 
248   if (access.decorators() & IN_NATIVE) {
249     barrier_data |= ShenandoahBarrierNative;
250   }
251 
252   access.set_barrier_data(barrier_data);
253 }
254 
255 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
256   // 1: non-reference load, no additional barrier is needed
257   if (!access.is_oop()) {
258     return BarrierSetC2::load_at_resolved(access, val_type);
259   }
260 
261   // 2. Set barrier data for LRB.
262   set_barrier_data(access);

263 
264   // 3. If we are reading the value of the referent field of a Reference object, we
265   // need to record the referent in an SATB log buffer using the pre-barrier
266   // mechanism.
267   DecoratorSet decorators = access.decorators();
268   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
269   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
270   bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
271   bool needs_read_barrier = ((on_weak || on_phantom) && !no_keepalive);
272   if (needs_read_barrier) {
273     uint8_t barriers = access.barrier_data() | ShenandoahBarrierSATB;
274     access.set_barrier_data(barriers);









































275   }
276 
277   return BarrierSetC2::load_at_resolved(access, val_type);
278 }
279 
280 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
281                                                              Node* new_val, const Type* value_type) const {

282   if (access.is_oop()) {
283     set_barrier_data(access);
284     access.set_barrier_data(access.barrier_data() | ShenandoahBarrierSATB | ShenandoahBarrierCardMark);









































285   }
286   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
287 }
288 
289 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
290                                                               Node* new_val, const Type* value_type) const {

291   if (access.is_oop()) {
292     set_barrier_data(access);
293     access.set_barrier_data(access.barrier_data() | ShenandoahBarrierSATB | ShenandoahBarrierCardMark);
















































294   }
295   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
296 }
297 
298 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {


299   if (access.is_oop()) {
300     set_barrier_data(access);
301     access.set_barrier_data(access.barrier_data() | ShenandoahBarrierSATB | ShenandoahBarrierCardMark);
302   }
303   return BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
304 }
305 
306 void ShenandoahBarrierSetC2::refine_store(const Node* n) {
307   MemNode* store = n->as_Store();
308   const Node* newval = n->in(MemNode::ValueIn);
309   assert(newval != nullptr, "");
310   const Type* newval_bottom = newval->bottom_type();
311   TypePtr::PTR newval_type = newval_bottom->make_ptr()->ptr();
312   uint8_t barrier_data = store->barrier_data();
313   if (!newval_bottom->isa_oopptr() &&
314       !newval_bottom->isa_narrowoop() &&
315       newval_type != TypePtr::Null) {
316     // newval is neither an OOP nor null, so there is no barrier to refine.
317     assert(barrier_data == 0, "non-OOP stores should have no barrier data");
318     return;
319   }
320   if (barrier_data == 0) {
321     // No barrier to refine.
322     return;















323   }
324   if (newval_type == TypePtr::Null) {
325     barrier_data &= ~ShenandoahBarrierNotNull;
326     // Simply elide post-barrier if writing null.
327     barrier_data &= ~ShenandoahBarrierCardMark;
328   } else if (newval_type == TypePtr::NotNull) {
329     barrier_data |= ShenandoahBarrierNotNull;
330   }
331   store->set_barrier_data(barrier_data);
332 }
333 
334 void ShenandoahBarrierSetC2::final_refinement(Compile* C) const {
335   ResourceMark rm;
336   VectorSet visited;
337   Node_List worklist;
338   worklist.push(C->root());
339   while (worklist.size() > 0) {
340     Node* n = worklist.pop();
341     if (visited.test_set(n->_idx)) {
342       continue;
343     }
344 
345     // Drop elided flag. Matcher does not care about this, and we would like to
346     // avoid invoking "barrier_data() != 0" rules when the *only* flag is Elided.
347     if (n->is_LoadStore()) {
348       LoadStoreNode* load_store = n->as_LoadStore();
349       uint8_t barrier_data = load_store->barrier_data();
350       if (barrier_data != 0) {
351         barrier_data &= ~ShenandoahBarrierElided;
352         load_store->set_barrier_data(barrier_data);
353       }
354     } else if (n->is_Mem()) {
355       MemNode* mem = n->as_Mem();
356       uint8_t barrier_data = mem->barrier_data();
357       if (barrier_data != 0) {
358         barrier_data &= ~ShenandoahBarrierElided;
359         mem->set_barrier_data(barrier_data);
360       }
361     }
362 
363     for (uint j = 0; j < n->req(); j++) {
364       Node* in = n->in(j);
365       if (in != nullptr) {
366         worklist.push(in);
367       }
368     }
369   }

370 }
371 
372 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
373   ResourceMark rm;
374   VectorSet visited;
375   Node_List worklist;
376   worklist.push(C->root());
377   while (worklist.size() > 0) {
378     Node* n = worklist.pop();
379     if (visited.test_set(n->_idx)) {
380       continue;
381     }
382     switch(n->Opcode()) {
383       case Op_StoreP:
384       case Op_StoreN: {
385         refine_store(n);
386         break;
387       }
388     }
389 
390     for (uint j = 0; j < n->req(); j++) {
391       Node* in = n->in(j);
392       if (in != nullptr) {
393         worklist.push(in);
394       }
395     }
396   }
397   return false;
398 }
399 
400 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
401   bool is_oop = is_reference_type(type);
402   if (!is_oop) {
403     return false;
404   }
405   if (ShenandoahSATBBarrier && tightly_coupled_alloc) {
406     if (phase == Optimization) {
407       return false;
408     }
409     return !is_clone;
410   }
411   return true;
412 }
413 
414 bool ShenandoahBarrierSetC2::clone_needs_barrier(const TypeOopPtr* src_type, bool& is_oop_array) {
415   if (!ShenandoahCloneBarrier) {
416     return false;
417   }
418 
419   if (src_type->isa_instptr() != nullptr) {
420     // Instance: need barrier only if there is a possibility of having an oop anywhere in it.
421     ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
422     if ((src_type->klass_is_exact() || !ik->has_subklass()) &&
423         !ik->has_injected_fields() && !ik->has_object_fields()) {
424       if (!src_type->klass_is_exact()) {
425         // Class is *currently* the leaf in the hierarchy.
426         // Record the dependency so that we deopt if this does not hold in future.
427         Compile::current()->dependencies()->assert_leaf_type(ik);

428       }
429       return false;
430     }
431   } else if (src_type->isa_aryptr() != nullptr) {
432     // Array: need barrier only if array is oop-bearing.
433     BasicType src_elem = src_type->isa_aryptr()->elem()->array_element_basic_type();
434     if (is_reference_type(src_elem, true)) {
435       is_oop_array = true;
436     } else {
437       return false;
438     }


439   }


440 
441   // Assume the worst.
442   return true;







































































443 }
444 
445 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
446   const TypeOopPtr* src_type = kit->gvn().type(src_base)->is_oopptr();
447 
448   bool is_oop_array = false;
449   if (!clone_needs_barrier(src_type, is_oop_array)) {
450     // No barrier is needed? Just do what common BarrierSetC2 wants with it.
451     BarrierSetC2::clone(kit, src_base, dst_base, size, is_array);
452     return;
453   }

454 
455   if (ShenandoahCloneRuntime || !is_array || !is_oop_array) {
456     // Looks like an instance? Prepare the instance clone. This would either
457     // be exploded into individual accesses or be left as runtime call.
458     // Common BarrierSetC2 prepares everything for both cases.
459     BarrierSetC2::clone(kit, src_base, dst_base, size, is_array);
460     return;
461   }

462 
463   // We are cloning the oop array. Prepare to call the normal arraycopy stub
464   // after the expansion. Normal stub takes the number of actual type-sized
465   // elements to copy after the base, compute the count here.
466   Node* offset = kit->MakeConX(arrayOopDesc::base_offset_in_bytes(UseCompressedOops ? T_NARROWOOP : T_OBJECT));
467   size = kit->gvn().transform(new SubXNode(size, offset));
468   size = kit->gvn().transform(new URShiftXNode(size, kit->intcon(LogBytesPerHeapOop)));
469   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, size, true, false);
470   ac->set_clone_array();
471   Node* n = kit->gvn().transform(ac);
472   if (n == ac) {
473     ac->set_adr_type(TypeRawPtr::BOTTOM);
474     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), TypeRawPtr::BOTTOM);
475   } else {
476     kit->set_all_memory(n);





477   }
478 }
479 
480 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
481   Node* const ctrl        = ac->in(TypeFunc::Control);
482   Node* const mem         = ac->in(TypeFunc::Memory);
483   Node* const src         = ac->in(ArrayCopyNode::Src);
484   Node* const src_offset  = ac->in(ArrayCopyNode::SrcPos);
485   Node* const dest        = ac->in(ArrayCopyNode::Dest);
486   Node* const dest_offset = ac->in(ArrayCopyNode::DestPos);
487   Node* length            = ac->in(ArrayCopyNode::Length);
488 
489   const TypeOopPtr* src_type = phase->igvn().type(src)->is_oopptr();
490 
491   bool is_oop_array = false;
492   if (!clone_needs_barrier(src_type, is_oop_array)) {
493     // No barrier is needed? Expand to normal HeapWord-sized arraycopy.
494     BarrierSetC2::clone_at_expansion(phase, ac);
495     return;
496   }





497 
498   if (ShenandoahCloneRuntime || !ac->is_clone_array() || !is_oop_array) {
499     // Still looks like an instance? Likely a large instance or reflective
500     // clone with unknown length. Go to runtime and handle it there.
501     clone_in_runtime(phase, ac, CAST_FROM_FN_PTR(address, ShenandoahRuntime::clone_addr()), "ShenandoahRuntime::clone");
502     return;
503   }
504 
505   // We are cloning the oop array. Call into normal oop array copy stubs.
506   // Those stubs would call BarrierSetAssembler to handle GC barriers.
507 
508   // This is the full clone, so offsets should equal each other and be at array base.
509   assert(src_offset == dest_offset, "should be equal");
510   const jlong offset = src_offset->get_long();
511   const TypeAryPtr* const ary_ptr = src->get_ptr_type()->isa_aryptr();
512   BasicType bt = ary_ptr->elem()->array_element_basic_type();
513   assert(offset == arrayOopDesc::base_offset_in_bytes(bt), "should match");
514 
515   const char*   copyfunc_name = "arraycopy";
516   const address copyfunc_addr = phase->basictype2arraycopy(T_OBJECT, nullptr, nullptr, true, copyfunc_name, true);
517 
518   Node* const call = phase->make_leaf_call(ctrl, mem,
519       OptoRuntime::fast_arraycopy_Type(),
520       copyfunc_addr, copyfunc_name,
521       TypeRawPtr::BOTTOM,
522       phase->basic_plus_adr(src, src_offset),
523       phase->basic_plus_adr(dest, dest_offset),
524       length,
525       phase->top()
526   );
527   phase->transform_later(call);
528 
529   phase->igvn().replace_node(ac, call);
530 }
531 
532 // Support for macro expanded GC barriers
533 void ShenandoahBarrierSetC2::eliminate_gc_barrier_data(Node* node) const {
534   if (node->is_LoadStore()) {
535     LoadStoreNode* loadstore = node->as_LoadStore();
536     loadstore->set_barrier_data(0);
537   } else if (node->is_Mem()) {
538     MemNode* mem = node->as_Mem();
539     mem->set_barrier_data(0);
540   }
541 }
542 
543 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
544   eliminate_gc_barrier_data(node);




545 }
546 
547 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
548   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
549 }
550 
551 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
552   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
553 }
554 
555 void ShenandoahBarrierSetC2::print_barrier_data(outputStream* os, uint8_t data) {
556   os->print(" Node barriers: ");
557   if ((data & ShenandoahBarrierStrong) != 0) {
558     data &= ~ShenandoahBarrierStrong;
559     os->print("strong ");
560   }
561 
562   if ((data & ShenandoahBarrierWeak) != 0) {
563     data &= ~ShenandoahBarrierWeak;
564     os->print("weak ");
565   }
566 
567   if ((data & ShenandoahBarrierPhantom) != 0) {
568     data &= ~ShenandoahBarrierPhantom;
569     os->print("phantom ");
570   }
571 
572   if ((data & ShenandoahBarrierNative) != 0) {
573     data &= ~ShenandoahBarrierNative;
574     os->print("native ");
575   }
576 
577   if ((data & ShenandoahBarrierElided) != 0) {
578     data &= ~ShenandoahBarrierElided;
579     os->print("elided ");
580   }
581 
582   if ((data & ShenandoahBarrierSATB) != 0) {
583     data &= ~ShenandoahBarrierSATB;
584     os->print("satb ");
585   }
586 
587   if ((data & ShenandoahBarrierCardMark) != 0) {
588     data &= ~ShenandoahBarrierCardMark;
589     os->print("cardmark ");
590   }
591 
592   if ((data & ShenandoahBarrierNotNull) != 0) {
593     data &= ~ShenandoahBarrierNotNull;
594     os->print("not-null ");
595   }
596   os->cr();
597 
598   if (data > 0) {
599     fatal("Unknown bit!");
600   }
601 
602   os->print_cr(" GC configuration: %sLRB %sSATB %sCAS %sClone %sCard",
603     (ShenandoahLoadRefBarrier ? "+" : "-"),
604     (ShenandoahSATBBarrier    ? "+" : "-"),
605     (ShenandoahCASBarrier     ? "+" : "-"),
606     (ShenandoahCloneBarrier   ? "+" : "-"),
607     (ShenandoahCardBarrier    ? "+" : "-")
608   );
609 }
610 
611 #ifdef ASSERT
612 void ShenandoahBarrierSetC2::verify_gc_barrier_assert(bool cond, const char* msg, uint8_t bd, Node* n) {
613   if (!cond) {
614     stringStream ss;
615     ss.print_cr("%s", msg);
616     ss.print_cr("-----------------");
617     print_barrier_data(&ss, bd);
618     ss.print_cr("-----------------");
619     n->dump_bfs(1, nullptr, "", &ss);
620     report_vm_error(__FILE__, __LINE__, ss.as_string());
621   }
622 }
623 
624 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
625   if (!ShenandoahVerifyOptoBarriers) {
626     return;
627   }
628 
629   // Final refinement might have removed the remaining ShenandoahBarrierElided flag,
630   // making some accesses completely blank. TODO: If we get rid of ShenandoahBarrierElided
631   // machinery completely, we can drop this filter too.
632   bool accept_blank = (phase == BeforeCodeGen);
633 
634   Unique_Node_List wq;
635   Node_Stack phis(0);
636   VectorSet visited;



637 
638   wq.push(compile->root());
639   for (uint next = 0; next < wq.size(); next++) {
640     Node *n = wq.at(next);
641     int opc = n->Opcode();
642 
643     if (opc == Op_LoadP || opc == Op_LoadN) {
644       uint8_t bd = n->as_Load()->barrier_data();
645 
646       const TypePtr* adr_type = n->as_Load()->adr_type();
647       if (adr_type->isa_oopptr() || adr_type->isa_narrowoop()) {
648         verify_gc_barrier_assert(accept_blank || bd != 0, "Oop load should have barrier data", bd, n);
649 
650         bool is_weak = ((bd & (ShenandoahBarrierWeak | ShenandoahBarrierPhantom)) != 0);
651         bool is_referent = adr_type->isa_instptr() &&
652             adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
653             adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset();
654 
655         verify_gc_barrier_assert(!is_weak || is_referent, "Weak load only for Reference.referent", bd, n);
656       } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
657         // Some LoadP-s are used for T_ADDRESS loads from raw pointers. These are not oops.
658         // Some LoadP-s are used to load class data.
659         // TODO: Verify their barrier data.
660       } else {
661         verify_gc_barrier_assert(false, "Unclassified access type", bd, n);
662       }
663     } else if (opc == Op_StoreP || opc == Op_StoreN) {
664       uint8_t bd = n->as_Store()->barrier_data();
665       const TypePtr* adr_type = n->as_Store()->adr_type();
666       if (adr_type->isa_oopptr() || adr_type->isa_narrowoop()) {
667         // Reference.clear stores null
668         bool is_referent = adr_type->isa_instptr() &&
669              adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
670              adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset();
671 
672         const TypePtr* val_type = n->as_Store()->in(MemNode::Memory)->adr_type();
673         if (!is_referent && (val_type->isa_oopptr() || val_type->isa_narrowoop())) {
674           verify_gc_barrier_assert(accept_blank || bd != 0, "Oop store should have barrier data", bd, n);
675         }
676       } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
677         // Similar to LoadP-s, some of these accesses are raw, and some are handling oops.
678         // TODO: Verify their barrier data.
679       } else {
680         verify_gc_barrier_assert(false, "Unclassified access type", bd, n);
































681       }
682     } else if (opc == Op_WeakCompareAndSwapP || opc == Op_WeakCompareAndSwapN ||
683                opc == Op_CompareAndExchangeP || opc == Op_CompareAndExchangeN ||
684                opc == Op_CompareAndSwapP     || opc == Op_CompareAndSwapN ||
685                opc == Op_GetAndSetP          || opc == Op_GetAndSetN) {
686       uint8_t bd = n->as_LoadStore()->barrier_data();
687       verify_gc_barrier_assert(accept_blank || bd != 0, "Oop load-store should have barrier data", bd, n);
688     } else if (n->is_Mem()) {
689       uint8_t bd = MemNode::barrier_data(n); // FIXME: LOL HotSpot, why not n->as_Mem()? LoadStore is both is_Mem() and not as_Mem().
690       verify_gc_barrier_assert(bd == 0, "Other mem nodes should have no barrier data", bd, n);
691     }
692 
693     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
694       Node* m = n->fast_out(i);
695       wq.push(m);
696     }
697   }
698 }
699 #endif
700 
701 static ShenandoahBarrierSetC2State* barrier_set_state() {
702   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
703 }
704 
705 int ShenandoahBarrierSetC2::estimate_stub_size() const {
706   Compile* const C = Compile::current();
707   BufferBlob* const blob = C->output()->scratch_buffer_blob();
708   GrowableArray<ShenandoahBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
709   int size = 0;
710 
711   for (int i = 0; i < stubs->length(); i++) {
712     CodeBuffer cb(blob->content_begin(), checked_cast<CodeBuffer::csize_t>((address)C->output()->scratch_locs_memory() - blob->content_begin()));
713     MacroAssembler masm(&cb);
714     stubs->at(i)->emit_code(masm);
715     size += cb.insts_size();
716   }















717 
718   return size;
719 }
720 
721 void ShenandoahBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
722   MacroAssembler masm(&cb);
723   GrowableArray<ShenandoahBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
724   barrier_set_state()->set_stubs_start_offset(masm.offset());






















725 
726   for (int i = 0; i < stubs->length(); i++) {
727     // Make sure there is enough space in the code buffer
728     if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
729       ciEnv::current()->record_failure("CodeCache is full");
730       return;
731     }
732 
733     stubs->at(i)->emit_code(masm);
734   }
735 
736   masm.flush();
737 
738 }
739 
740 void ShenandoahBarrierStubC2::register_stub() {
741   if (!Compile::current()->output()->in_scratch_emit_size()) {
742     barrier_set_state()->stubs()->append(this);



743   }

744 }
745 
746 ShenandoahStoreBarrierStubC2* ShenandoahStoreBarrierStubC2::create(const MachNode* node, Address dst, bool dst_narrow, Register src, bool src_narrow, Register tmp) {
747   auto* stub = new (Compile::current()->comp_arena()) ShenandoahStoreBarrierStubC2(node, dst, dst_narrow, src, src_narrow, tmp);
748   stub->register_stub();
749   return stub;


























750 }
751 
752 ShenandoahLoadRefBarrierStubC2* ShenandoahLoadRefBarrierStubC2::create(const MachNode* node, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
753   auto* stub = new (Compile::current()->comp_arena()) ShenandoahLoadRefBarrierStubC2(node, obj, addr, tmp1, tmp2, tmp3, narrow);
754   stub->register_stub();
755   return stub;







































756 }
757 
758 ShenandoahSATBBarrierStubC2* ShenandoahSATBBarrierStubC2::create(const MachNode* node, Register addr, Register preval, Register tmp, bool encoded_preval) {
759   auto* stub = new (Compile::current()->comp_arena()) ShenandoahSATBBarrierStubC2(node, addr, preval, tmp, encoded_preval);
760   stub->register_stub();
761   return stub;

















762 }
763 
764 ShenandoahCASBarrierSlowStubC2* ShenandoahCASBarrierSlowStubC2::create(const MachNode* node, Register addr, Register expected, Register new_val, Register result, Register tmp1, Register tmp2, bool cae, bool acquire, bool release, bool weak) {
765   auto* stub = new (Compile::current()->comp_arena()) ShenandoahCASBarrierSlowStubC2(node, addr, Address(), expected, new_val, result, tmp1, tmp2, cae, acquire, release, weak);
766   stub->register_stub();
767   return stub;
768 }
769 
770 ShenandoahCASBarrierSlowStubC2* ShenandoahCASBarrierSlowStubC2::create(const MachNode* node, Address addr, Register expected, Register new_val, Register result, Register tmp1, Register tmp2, bool cae) {
771   auto* stub = new (Compile::current()->comp_arena()) ShenandoahCASBarrierSlowStubC2(node, noreg, addr, expected, new_val, result, tmp1, tmp2, cae, false, false, false);
772   stub->register_stub();
773   return stub;
774 }
775 
776 bool ShenandoahBarrierSetC2State::needs_liveness_data(const MachNode* mach) const {
777   return ShenandoahSATBBarrierStubC2::needs_barrier(mach) ||
778          ShenandoahLoadRefBarrierStubC2::needs_barrier(mach);
















779 }
780 
781 bool ShenandoahBarrierSetC2State::needs_livein_data() const {
782   return true;





783 }
--- EOF ---