1 /*
2 * Copyright (c) 2018, 2026, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "classfile/javaClasses.inline.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
29 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/escape.hpp"
36 #include "opto/graphKit.hpp"
37 #include "opto/idealKit.hpp"
38 #include "opto/macro.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/output.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/runtime.hpp"
43
44 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
45 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
46 }
47
48 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) :
49 BarrierSetC2State(comp_arena),
50 _stubs(new (comp_arena) GrowableArray<ShenandoahBarrierStubC2*>(comp_arena, 8, 0, nullptr)),
51 _trampoline_stubs_count(0),
52 _stubs_start_offset(0) {
53 }
54
55 static void set_barrier_data(C2Access& access, bool load, bool store) {
56 if (!access.is_oop()) {
57 return;
58 }
59
60 DecoratorSet decorators = access.decorators();
61 bool tightly_coupled = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
62 bool in_heap = (decorators & IN_HEAP) != 0;
63 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
64 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
65
66 if (tightly_coupled) {
67 access.set_barrier_data(ShenandoahBitElided);
68 return;
69 }
70
71 uint8_t barrier_data = 0;
72
73 if (load) {
74 if (ShenandoahLoadRefBarrier) {
75 if (on_phantom) {
76 barrier_data |= ShenandoahBitPhantom;
77 } else if (on_weak) {
78 barrier_data |= ShenandoahBitWeak;
79 } else {
80 barrier_data |= ShenandoahBitStrong;
81 }
82 }
83 }
84
85 if (store) {
86 if (ShenandoahSATBBarrier) {
87 barrier_data |= ShenandoahBitKeepAlive;
88 }
89 if (ShenandoahCardBarrier && in_heap) {
90 barrier_data |= ShenandoahBitCardMark;
91 }
92 }
93
94 if (!in_heap) {
95 barrier_data |= ShenandoahBitNative;
96 }
97
98 access.set_barrier_data(barrier_data);
99 }
100
101 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
102 // 1: Non-reference load, no additional barrier is needed
103 if (!access.is_oop()) {
104 return BarrierSetC2::load_at_resolved(access, val_type);
105 }
106
107 // 2. Set barrier data for load
108 set_barrier_data(access, /* load = */ true, /* store = */ false);
109
110 // 3. Correction: If we are reading the value of the referent field of
111 // a Reference object, we need to record the referent resurrection.
112 DecoratorSet decorators = access.decorators();
113 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
114 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
115 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
116 bool needs_keepalive = ((on_weak || on_phantom) && !no_keepalive);
117 if (needs_keepalive) {
118 uint8_t barriers = access.barrier_data() | (ShenandoahSATBBarrier ? ShenandoahBitKeepAlive : 0);
119 access.set_barrier_data(barriers);
120 }
121
122 return BarrierSetC2::load_at_resolved(access, val_type);
123 }
124
125 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
126 // 1: Non-reference store, no additional barrier is needed
127 if (!access.is_oop()) {
128 return BarrierSetC2::store_at_resolved(access, val);
129 }
130
131 // 2. Set barrier data for store
132 set_barrier_data(access, /* load = */ false, /* store = */ true);
133
134 // 3. Correction: avoid keep-alive barriers that should not do keep-alive.
135 DecoratorSet decorators = access.decorators();
136 bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
137 if (no_keepalive) {
138 access.set_barrier_data(access.barrier_data() & ~ShenandoahBitKeepAlive);
139 }
140
141 return BarrierSetC2::store_at_resolved(access, val);
142 }
143
144 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
145 Node* new_val, const Type* value_type) const {
146 set_barrier_data(access, /* load = */ true, /* store = */ true);
147 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
148 }
149
150 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
151 Node* new_val, const Type* value_type) const {
152 set_barrier_data(access, /* load = */ true, /* store = */ true);
153 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
154 }
155
156 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
157 set_barrier_data(access, /* load = */ true, /* store = */ true);
158 return BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
159 }
160
161 bool ShenandoahBarrierSetC2::is_Load(int opcode) {
162 switch (opcode) {
163 case Op_LoadN:
164 case Op_LoadP:
165 return true;
166 default:
167 return false;
168 }
169 }
170
171 bool ShenandoahBarrierSetC2::is_Store(int opcode) {
172 switch (opcode) {
173 case Op_StoreN:
174 case Op_StoreP:
175 return true;
176 default:
177 return false;
178 }
179 }
180
181 bool ShenandoahBarrierSetC2::is_LoadStore(int opcode) {
182 switch (opcode) {
183 case Op_CompareAndExchangeN:
184 case Op_CompareAndExchangeP:
185 case Op_WeakCompareAndSwapN:
186 case Op_WeakCompareAndSwapP:
187 case Op_CompareAndSwapN:
188 case Op_CompareAndSwapP:
189 case Op_GetAndSetP:
190 case Op_GetAndSetN:
191 return true;
192 default:
193 return false;
194 }
195 }
196
197 bool ShenandoahBarrierSetC2::can_remove_load_barrier(Node* root) {
198 // Check if all outs feed into nodes that do not expose the oops to the rest
199 // of the runtime system. In this case, we can elide the LRB barrier. We bail
200 // out with false at the first sight of trouble.
201
202 ResourceMark rm;
203 VectorSet visited;
204 Node_List worklist;
205 worklist.push(root);
206
207 while (worklist.size() > 0) {
208 Node* n = worklist.pop();
209 if (visited.test_set(n->_idx)) {
210 continue;
211 }
212
213 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
214 Node* out = n->fast_out(i);
215 switch (out->Opcode()) {
216 case Op_EncodeP:
217 case Op_DecodeN:
218 case Op_CastPP:
219 case Op_CheckCastPP:
220 case Op_AddP: {
221 // Transitive node, check if any other outs are doing anything troublesome.
222 worklist.push(out);
223 break;
224 }
225
226 case Op_LoadRange:
227 case Op_LoadKlass: {
228 // Loads of stable metadata values from the object. These are the same in all copies.
229 // Note that LoadNKlass is *not* safe: with +UCOH, it loads from mark word, which
230 // clashes with forwarding pointers.
231 break;
232 }
233
234 case Op_CmpN: {
235 if (out->in(1) == n &&
236 out->in(2)->Opcode() == Op_ConN &&
237 out->in(2)->get_narrowcon() == 0) {
238 // Null check, no oop is exposed.
239 break;
240 }
241 if (out->in(2) == n &&
242 out->in(1)->Opcode() == Op_ConN &&
243 out->in(1)->get_narrowcon() == 0) {
244 // Null check, no oop is exposed.
245 break;
246 }
247 return false;
248 }
249
250 case Op_CmpP: {
251 if (out->in(1) == n &&
252 out->in(2)->Opcode() == Op_ConP &&
253 out->in(2)->get_ptr() == 0) {
254 // Null check, no oop is exposed.
255 break;
256 }
257 if (out->in(2) == n &&
258 out->in(1)->Opcode() == Op_ConP &&
259 out->in(1)->get_ptr() == 0) {
260 // Null check, no oop is exposed.
261 break;
262 }
263 return false;
264 }
265
266 case Op_CallStaticJava: {
267 if (out->as_CallStaticJava()->is_uncommon_trap()) {
268 // Local feeds into uncommon trap. Deopt machinery handles barriers itself.
269 break;
270 }
271 return false;
272 }
273
274 default: {
275 // Paranoidly distrust any other nodes.
276 return false;
277 }
278 }
279 }
280 }
281
282 // Nothing troublesome found.
283 return true;
284 }
285
286 uint8_t ShenandoahBarrierSetC2::refine_load(Node* n, uint8_t bd) {
287 assert(ShenandoahElideBarriers, "Checked by caller");
288 assert(bd != 0, "Checked by caller");
289
290 // Do not touch weak loads at all: they are responsible for shielding from
291 // Reference.referent resurrection.
292 if ((bd & (ShenandoahBitWeak | ShenandoahBitPhantom)) != 0) {
293 return bd;
294 }
295
296 if (can_remove_load_barrier(n)) {
297 bd &= ~ShenandoahBitStrong;
298 bd |= ShenandoahBitElided;
299 }
300
301 return bd;
302 }
303
304 uint8_t ShenandoahBarrierSetC2::refine_store(Node* n, uint8_t bd) {
305 assert(ShenandoahElideBarriers, "Checked by caller");
306 assert(bd != 0, "Checked by caller");
307 assert(n->is_Mem() || n->is_LoadStore(), "Sanity");
308
309 // Not an oop store? There should be no barriers.
310 const Node* newval = n->in(MemNode::ValueIn);
311 assert(newval != nullptr, "Should be present");
312 const Type* newval_bottom = newval->bottom_type();
313 if (!newval_bottom->isa_oopptr() &&
314 !newval_bottom->isa_narrowoop() &&
315 newval_bottom != TypePtr::NULL_PTR) {
316 assert(bd == 0, "Non-oop stores should have no barrier data");
317 return bd;
318 }
319
320 // Type system tells us something about nullity?
321 const TypePtr* newval_type = newval_bottom->make_ptr();
322 assert(newval_type != nullptr, "Should have been filtered before");
323 TypePtr::PTR newval_type_ptr = newval_type->ptr();
324 if (newval_type_ptr == TypePtr::Null) {
325 bd &= ~ShenandoahBitNotNull;
326 // Card table barrier is not needed if we store null.
327 bd &= ~ShenandoahBitCardMark;
328 } else if (newval_type_ptr == TypePtr::NotNull) {
329 // Definitely not null.
330 bd |= ShenandoahBitNotNull;
331 }
332
333 return bd;
334 }
335
336 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
337 if (!ShenandoahElideBarriers) {
338 return false;
339 }
340
341 ResourceMark rm;
342 VectorSet visited;
343 Node_List worklist;
344 worklist.push(C->root());
345 while (worklist.size() > 0) {
346 Node* n = worklist.pop();
347 if (visited.test_set(n->_idx)) {
348 continue;
349 }
350
351 int opc = n->Opcode();
352 bool is_load = is_Load(opc);
353 bool is_store = is_Store(opc);
354 bool is_load_store = is_LoadStore(opc);
355
356 uint8_t bd = 0;
357 if (is_load_store) {
358 bd = n->as_LoadStore()->barrier_data();
359 } else if (is_load || is_store) {
360 bd = n->as_Mem()->barrier_data();
361 }
362
363 if (bd != 0) {
364 if (is_load || is_load_store) {
365 bd = refine_load(n, bd);
366 }
367 if (is_store || is_load_store) {
368 bd = refine_store(n, bd);
369 }
370 if (is_load_store) {
371 n->as_LoadStore()->set_barrier_data(bd);
372 } else {
373 n->as_Mem()->set_barrier_data(bd);
374 }
375 }
376
377 for (uint j = 0; j < n->req(); j++) {
378 Node* in = n->in(j);
379 if (in != nullptr) {
380 worklist.push(in);
381 }
382 }
383 }
384 return false;
385 }
386
387 // Support for macro expanded GC barriers
388 void ShenandoahBarrierSetC2::eliminate_gc_barrier_data(Node* node) const {
389 if (node->is_LoadStore()) {
390 LoadStoreNode* loadstore = node->as_LoadStore();
391 loadstore->set_barrier_data(0);
392 } else if (node->is_Mem()) {
393 MemNode* mem = node->as_Mem();
394 mem->set_barrier_data(0);
395 }
396 }
397
398 // If there are no real barrier flags on the node, strip away additional fluff.
399 // Matcher does not care about this, and we would like to avoid invoking "barrier_data() != 0"
400 // rules when the only flags are the irrelevant fluff.
401 void ShenandoahBarrierSetC2::strip_extra_data(const Node* n) const {
402 if (n->is_LoadStore()) {
403 LoadStoreNode* load_store = n->as_LoadStore();
404 uint8_t barrier_data = load_store->barrier_data();
405 if ((barrier_data & ShenandoahBitsReal) == 0) {
406 load_store->set_barrier_data(0);
407 }
408 } else if (n->is_Mem()) {
409 MemNode* mem = n->as_Mem();
410 uint8_t barrier_data = mem->barrier_data();
411 if ((barrier_data & ShenandoahBitsReal) == 0) {
412 mem->set_barrier_data(0);
413 }
414 }
415 }
416
417 void ShenandoahBarrierSetC2::strip_extra_data(Node_List& accesses) const {
418 for (uint c = 0; c < accesses.size(); c++) {
419 strip_extra_data(accesses.at(c));
420 }
421 }
422
423 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
424 eliminate_gc_barrier_data(node);
425 }
426
427 void ShenandoahBarrierSetC2::elide_dominated_barrier(MachNode* node, MachNode* dominator) const {
428 uint8_t orig_bd = node->barrier_data();
429 if (orig_bd == 0) {
430 // Nothing to do.
431 return;
432 }
433
434 uint8_t bd = orig_bd;
435 int node_opcode = node->ideal_Opcode();
436
437 if (dominator == nullptr) {
438 // Must be allocation node.
439 if (is_Load(node_opcode) || is_LoadStore(node_opcode)) {
440 // Loads from recent allocations do not need LRBs.
441 bd &= ~ShenandoahBitStrong;
442 }
443 if (is_Store(node_opcode) || is_LoadStore(node_opcode)) {
444 // Stores to recent allocations do not need KA or CM.
445 bd &= ~ShenandoahBitKeepAlive;
446 bd &= ~ShenandoahBitCardMark;
447 }
448 } else {
449 assert(is_Load(node_opcode) || is_Store(node_opcode) || is_LoadStore(node_opcode), "Sanity");
450 int dom_opcode = dominator->ideal_Opcode();
451 uint8_t dom_bd = dominator->barrier_data();
452
453 if (is_Load(dom_opcode) || is_LoadStore(dom_opcode)) {
454 // If dominating load is set up to perform LRB fixups, no further LRB is needed.
455 if ((dom_bd & ShenandoahBitStrong) != 0) {
456 bd &= ~ShenandoahBitStrong;
457 }
458 }
459 if (is_Store(dom_opcode)) {
460 // Dominating store has stored the good ref, no LRB is needed.
461 bd &= ~ShenandoahBitStrong;
462 }
463 }
464
465 if (orig_bd != bd) {
466 node->set_barrier_data(bd);
467 }
468 }
469
470 void ShenandoahBarrierSetC2::analyze_dominating_barriers() const {
471 if (!ShenandoahElideDominatedBarriers) {
472 return;
473 }
474
475 ResourceMark rm;
476 Node_List accesses, dominators;
477
478 PhaseCFG* const cfg = Compile::current()->cfg();
479 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
480 const Block* const block = cfg->get_block(i);
481 for (uint j = 0; j < block->number_of_nodes(); ++j) {
482 Node* const node = block->get_node(j);
483
484 // Everything that happens in allocations does not need barriers.
485 // Record them for dominance analysis.
486 if (node->is_Phi() && is_allocation(node)) {
487 dominators.push(node);
488 continue;
489 }
490
491 if (!node->is_Mach()) {
492 continue;
493 }
494
495 MachNode* const mach = node->as_Mach();
496 int opcode = mach->ideal_Opcode();
497 if (is_Load(opcode) || is_Store(opcode) || is_LoadStore(opcode)) {
498 if ((mach->barrier_data() & ShenandoahBitsReal) != 0) {
499 accesses.push(mach);
500 dominators.push(mach);
501 }
502 }
503 }
504 }
505
506 elide_dominated_barriers(accesses, dominators);
507
508 // Also clean up extra metadata. Dominance analysis likely left
509 // many non-elided barriers with extra metadata, which can be stripped away.
510 strip_extra_data(accesses);
511 }
512
513 uint ShenandoahBarrierSetC2::estimated_barrier_size(const Node* node) const {
514 // Barrier impact on fast-path is driven by GC state checks emitted very late.
515 // These checks are tight load-test-branch sequences, with no impact on C2 graph
516 // size. Limiting unrolling in presence of GC barriers might turn some loops
517 // tighter than with default unrolling, which may benefit performance due to denser
518 // code. Testing shows it is still counter-productive.
519 // Therefore, we report zero barrier size to let C2 do its normal thing.
520 return 0;
521 }
522
523 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const {
524 bool is_oop = is_reference_type(type);
525 if (!is_oop) {
526 return false;
527 }
528 if (ShenandoahSATBBarrier && tightly_coupled_alloc) {
529 if (phase == Optimization) {
530 return false;
531 }
532 return !is_clone;
533 }
534 return true;
535 }
536
537 bool ShenandoahBarrierSetC2::clone_needs_barrier(const TypeOopPtr* src_type, bool& is_oop_array) {
538 if (!ShenandoahCloneBarrier) {
539 return false;
540 }
541
542 if (src_type->isa_instptr() != nullptr) {
543 // Instance: need barrier only if there is a possibility of having an oop anywhere in it.
544 ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
545 if ((src_type->klass_is_exact() || !ik->has_subklass()) &&
546 !ik->has_injected_fields() && !ik->has_object_fields()) {
547 if (!src_type->klass_is_exact()) {
548 // Class is *currently* the leaf in the hierarchy.
549 // Record the dependency so that we deopt if this does not hold in future.
550 Compile::current()->dependencies()->assert_leaf_type(ik);
551 }
552 return false;
553 }
554 } else if (src_type->isa_aryptr() != nullptr) {
555 // Array: need barrier only if array is oop-bearing.
556 BasicType src_elem = src_type->isa_aryptr()->elem()->array_element_basic_type();
557 if (is_reference_type(src_elem, true)) {
558 is_oop_array = true;
559 } else {
560 return false;
561 }
562 }
563
564 // Assume the worst.
565 return true;
566 }
567
568 void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
569 const TypeOopPtr* src_type = kit->gvn().type(src_base)->is_oopptr();
570
571 bool is_oop_array = false;
572 if (!clone_needs_barrier(src_type, is_oop_array)) {
573 // No barrier is needed? Just do what common BarrierSetC2 wants with it.
574 BarrierSetC2::clone(kit, src_base, dst_base, size, is_array);
575 return;
576 }
577
578 if (ShenandoahCloneRuntime || !is_array || !is_oop_array) {
579 // Looks like an instance? Prepare the instance clone. This would either
580 // be exploded into individual accesses or be left as runtime call.
581 // Common BarrierSetC2 prepares everything for both cases.
582 BarrierSetC2::clone(kit, src_base, dst_base, size, is_array);
583 return;
584 }
585
586 // We are cloning the oop array. Prepare to call the normal arraycopy stub
587 // after the expansion. Normal stub takes the number of actual type-sized
588 // elements to copy after the base, compute the count here.
589 Node* offset = kit->MakeConX(arrayOopDesc::base_offset_in_bytes(UseCompressedOops ? T_NARROWOOP : T_OBJECT));
590 size = kit->gvn().transform(new SubXNode(size, offset));
591 size = kit->gvn().transform(new URShiftXNode(size, kit->intcon(LogBytesPerHeapOop)));
592 ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, size, true, false);
593 ac->set_clone_array();
594 Node* n = kit->gvn().transform(ac);
595 if (n == ac) {
596 ac->set_adr_type(TypeRawPtr::BOTTOM);
597 kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), TypeRawPtr::BOTTOM);
598 } else {
599 kit->set_all_memory(n);
600 }
601 }
602
603 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
604 Node* const ctrl = ac->in(TypeFunc::Control);
605 Node* const mem = ac->in(TypeFunc::Memory);
606 Node* const src = ac->in(ArrayCopyNode::Src);
607 Node* const src_offset = ac->in(ArrayCopyNode::SrcPos);
608 Node* const dest = ac->in(ArrayCopyNode::Dest);
609 Node* const dest_offset = ac->in(ArrayCopyNode::DestPos);
610 Node* length = ac->in(ArrayCopyNode::Length);
611
612 const TypeOopPtr* src_type = phase->igvn().type(src)->is_oopptr();
613
614 bool is_oop_array = false;
615 if (!clone_needs_barrier(src_type, is_oop_array)) {
616 // No barrier is needed? Expand to normal HeapWord-sized arraycopy.
617 BarrierSetC2::clone_at_expansion(phase, ac);
618 return;
619 }
620
621 if (ShenandoahCloneRuntime || !ac->is_clone_array() || !is_oop_array) {
622 // Still looks like an instance? Likely a large instance or reflective
623 // clone with unknown length. Go to runtime and handle it there.
624 clone_in_runtime(phase, ac, CAST_FROM_FN_PTR(address, ShenandoahRuntime::clone_addr()), "ShenandoahRuntime::clone");
625 return;
626 }
627
628 // We are cloning the oop array. Call into normal oop array copy stubs.
629 // Those stubs would call BarrierSetAssembler to handle GC barriers.
630
631 // This is the full clone, so offsets should equal each other and be at array base.
632 assert(src_offset == dest_offset, "should be equal");
633 const jlong offset = src_offset->get_long();
634 const TypeAryPtr* const ary_ptr = src->get_ptr_type()->isa_aryptr();
635 BasicType bt = ary_ptr->elem()->array_element_basic_type();
636 assert(offset == arrayOopDesc::base_offset_in_bytes(bt), "should match");
637
638 const char* copyfunc_name = "arraycopy";
639 const address copyfunc_addr = phase->basictype2arraycopy(T_OBJECT, nullptr, nullptr, true, copyfunc_name, true);
640
641 Node* const call = phase->make_leaf_call(ctrl, mem,
642 OptoRuntime::fast_arraycopy_Type(),
643 copyfunc_addr, copyfunc_name,
644 TypeRawPtr::BOTTOM,
645 phase->basic_plus_adr(src, src_offset),
646 phase->basic_plus_adr(dest, dest_offset),
647 length,
648 phase->top()
649 );
650 phase->transform_later(call);
651
652 phase->igvn().replace_node(ac, call);
653 }
654
655 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
656 return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
657 }
658
659 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
660 return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
661 }
662
663 void ShenandoahBarrierSetC2::print_barrier_data(outputStream* os, uint8_t data) {
664 os->print(" Node barriers: ");
665 if ((data & ShenandoahBitStrong) != 0) {
666 data &= ~ShenandoahBitStrong;
667 os->print("strong ");
668 }
669
670 if ((data & ShenandoahBitWeak) != 0) {
671 data &= ~ShenandoahBitWeak;
672 os->print("weak ");
673 }
674
675 if ((data & ShenandoahBitPhantom) != 0) {
676 data &= ~ShenandoahBitPhantom;
677 os->print("phantom ");
678 }
679
680 if ((data & ShenandoahBitKeepAlive) != 0) {
681 data &= ~ShenandoahBitKeepAlive;
682 os->print("keepalive ");
683 }
684
685 if ((data & ShenandoahBitCardMark) != 0) {
686 data &= ~ShenandoahBitCardMark;
687 os->print("cardmark ");
688 }
689
690 if ((data & ShenandoahBitNative) != 0) {
691 data &= ~ShenandoahBitNative;
692 os->print("native ");
693 }
694
695 if ((data & ShenandoahBitNotNull) != 0) {
696 data &= ~ShenandoahBitNotNull;
697 os->print("not-null ");
698 }
699
700 if ((data & ShenandoahBitElided) != 0) {
701 data &= ~ShenandoahBitElided;
702 os->print("elided ");
703 }
704
705 os->cr();
706
707 if (data > 0) {
708 fatal("Unknown bit!");
709 }
710
711 os->print_cr(" GC configuration: %sLRB %sSATB %sCAS %sClone %sCard",
712 (ShenandoahLoadRefBarrier ? "+" : "-"),
713 (ShenandoahSATBBarrier ? "+" : "-"),
714 (ShenandoahCASBarrier ? "+" : "-"),
715 (ShenandoahCloneBarrier ? "+" : "-"),
716 (ShenandoahCardBarrier ? "+" : "-")
717 );
718 }
719
720 #ifdef ASSERT
721 void ShenandoahBarrierSetC2::verify_gc_barrier_assert(bool cond, const char* msg, uint8_t bd, Node* n) {
722 if (!cond) {
723 stringStream ss;
724 ss.print_cr("%s", msg);
725 ss.print_cr("-----------------");
726 print_barrier_data(&ss, bd);
727 ss.print_cr("-----------------");
728 n->dump_bfs(1, nullptr, "", &ss);
729 report_vm_error(__FILE__, __LINE__, ss.as_string());
730 }
731 }
732
733 void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
734 if (!ShenandoahVerifyOptoBarriers) {
735 return;
736 }
737
738 // Optimizations might have removed the remaining auxiliary flags, making some accesses completely blank.
739 bool accept_blank = (phase == BeforeCodeGen);
740 bool expect_load_barriers = !accept_blank && ShenandoahLoadRefBarrier;
741 bool expect_store_barriers = !accept_blank && (ShenandoahSATBBarrier || ShenandoahCardBarrier);
742 bool expect_load_store_barriers = !accept_blank && ShenandoahCASBarrier;
743
744 Unique_Node_List wq;
745
746 wq.push(compile->root());
747 for (uint next = 0; next < wq.size(); next++) {
748 Node *n = wq.at(next);
749 int opc = n->Opcode();
750
751 uint8_t bd = 0;
752 const TypePtr* adr_type = nullptr;
753 if (is_Load(opc)) {
754 bd = n->as_Load()->barrier_data();
755 adr_type = n->as_Load()->adr_type();
756 } else if (is_Store(opc)) {
757 bd = n->as_Store()->barrier_data();
758 adr_type = n->as_Store()->adr_type();
759 } else if (is_LoadStore(opc)) {
760 bd = n->as_LoadStore()->barrier_data();
761 adr_type = n->as_LoadStore()->adr_type();
762 } else if (n->is_Mem()) {
763 bd = MemNode::barrier_data(n);
764 verify_gc_barrier_assert(bd == 0, "Other mem nodes should have no barrier data", bd, n);
765 }
766
767 bool is_weak = (bd & (ShenandoahBitWeak | ShenandoahBitPhantom)) != 0;
768 bool is_native = (bd & ShenandoahBitNative) != 0;
769
770 bool is_referent = adr_type != nullptr &&
771 adr_type->isa_instptr() &&
772 adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
773 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset();
774
775 bool is_oop_addr = (adr_type != nullptr) && (adr_type->isa_oopptr() || adr_type->isa_narrowoop());
776 bool is_raw_addr = (adr_type != nullptr) && (adr_type->isa_rawptr() || adr_type->isa_klassptr());
777
778 if (is_oop_addr) {
779 if (is_Load(opc)) {
780 verify_gc_barrier_assert(!expect_load_barriers || (bd != 0), "Oop load should have barrier data", bd, n);
781 verify_gc_barrier_assert(!is_weak || is_referent, "Weak load only for Reference.referent", bd, n);
782 } else if (is_Store(opc)) {
783 // Reference.referent stores can be without barriers.
784 verify_gc_barrier_assert(!expect_store_barriers || is_referent || (bd != 0), "Oop store should have barrier data", bd, n);
785 } else if (is_LoadStore(opc)) {
786 verify_gc_barrier_assert(!expect_load_store_barriers || (bd != 0), "Oop load-store should have barrier data", bd, n);
787 }
788 } else if (is_raw_addr) {
789 if (is_native) {
790 if (is_Load(opc)) {
791 verify_gc_barrier_assert(!expect_load_barriers || (bd != 0), "Native oop load should have barrier data", bd, n);
792 }
793 if (is_Store(opc)) {
794 verify_gc_barrier_assert(!expect_store_barriers || (bd != 0), "Native oop store should have barrier data", bd, n);
795 }
796 if (is_LoadStore(opc)) {
797 verify_gc_barrier_assert(!expect_load_store_barriers || (bd != 0), "Native oop load-store should have barrier data", bd, n);
798 }
799 } else {
800 // Some Load/Stores are used for T_ADDRESS and/or raw stores, which are supposed not to have barriers.
801 // Some other Load/Stores are emitted for real oops, but on raw addresses via Unsafe.
802 // The distinction on this level is lost, so we cannot really verify this.
803 }
804 } else {
805 if (is_Load(opc) || is_Store(opc) || is_LoadStore(opc)) {
806 verify_gc_barrier_assert(false, "Unclassified access type", bd, n);
807 }
808 }
809
810 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
811 Node* m = n->fast_out(i);
812 wq.push(m);
813 }
814 }
815 }
816 #endif
817
818 static ShenandoahBarrierSetC2State* barrier_set_state() {
819 return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
820 }
821
822 int ShenandoahBarrierSetC2::estimate_stub_size() const {
823 GrowableArray<ShenandoahBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
824 assert(stubs->is_empty(), "Lifecycle: no stubs were yet created");
825 return 0;
826 }
827
828 void ShenandoahBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
829 MacroAssembler masm(&cb);
830
831 PhaseOutput* const output = Compile::current()->output();
832 assert(masm.offset() <= output->buffer_sizing_data()->_code,
833 "Stubs are assumed to be emitted directly after code and code_size is a hard limit on where it can start");
834 barrier_set_state()->set_stubs_start_offset(masm.offset());
835
836 // Stub generation counts all stubs as skipped for the sake of inlining policy.
837 // This is critical for performance, check it.
838 #ifdef ASSERT
839 int offset_before = masm.offset();
840 int skipped_before = cb.total_skipped_instructions_size();
841 #endif
842
843 GrowableArray<ShenandoahBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
844 for (int i = 0; i < stubs->length(); i++) {
845 // Make sure there is enough space in the code buffer
846 if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
847 ciEnv::current()->record_failure("CodeCache is full");
848 return;
849 }
850 stubs->at(i)->emit_code(masm);
851 }
852
853 #ifdef ASSERT
854 int offset_after = masm.offset();
855 int skipped_after = cb.total_skipped_instructions_size();
856 assert(offset_after - offset_before == skipped_after - skipped_before,
857 "All stubs are counted as skipped. masm: %d - %d = %d, cb: %d - %d = %d",
858 offset_after, offset_before, offset_after - offset_before,
859 skipped_after, skipped_before, skipped_after - skipped_before);
860 #endif
861
862 masm.flush();
863 }
864
865 void ShenandoahBarrierStubC2::register_stub(ShenandoahBarrierStubC2* stub) {
866 if (!Compile::current()->output()->in_scratch_emit_size()) {
867 barrier_set_state()->stubs()->append(stub);
868 }
869 }
870
871 ShenandoahBarrierStubC2* ShenandoahBarrierStubC2::create(const MachNode* node, Register obj, Address addr, bool narrow, bool do_load) {
872 auto* stub = new (Compile::current()->comp_arena()) ShenandoahBarrierStubC2(node, obj, addr, narrow, do_load);
873 ShenandoahBarrierStubC2::register_stub(stub);
874 return stub;
875 }
876
877 bool ShenandoahBarrierStubC2::is_live_register(Register reg) {
878 return preserve_set().member(OptoReg::as_OptoReg(reg->as_VMReg()));
879 }
880
881 Register ShenandoahBarrierStubC2::select_temp_register(bool& selected_live, Register skip_reg1) {
882 Register tmp = noreg;
883 Register fallback_live = noreg;
884
885 // Try to select non-live first:
886 for (int i = 0; i < available_gp_registers(); i++) {
887 Register r = as_Register(i);
888 if (r != _obj && r != _addr.base() && r != _addr.index() &&
889 r != skip_reg1 && !is_special_register(r)) {
890 if (!is_live_register(r)) {
891 tmp = r;
892 break;
893 } else if (fallback_live == noreg) {
894 fallback_live = r;
895 }
896 }
897 }
898
899 // If we could not find a non-live register, select the live fallback:
900 if (tmp == noreg) {
901 tmp = fallback_live;
902 selected_live = true;
903 } else {
904 selected_live = false;
905 }
906
907 assert(tmp != noreg, "successfully selected");
908 assert_different_registers(tmp, skip_reg1);
909 assert_different_registers(tmp, _obj);
910 assert_different_registers(tmp, _addr.base());
911 assert_different_registers(tmp, _addr.index());
912 return tmp;
913 }
914
915 address ShenandoahBarrierStubC2::keepalive_runtime_entry_addr() {
916 if (_narrow) {
917 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_narrow);
918 } else {
919 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre);
920 }
921 }
922
923 address ShenandoahBarrierStubC2::lrb_runtime_entry_addr() {
924 bool is_strong = (_node->barrier_data() & ShenandoahBitStrong) != 0;
925 bool is_weak = (_node->barrier_data() & ShenandoahBitWeak) != 0;
926 bool is_phantom = (_node->barrier_data() & ShenandoahBitPhantom) != 0;
927
928 if (_narrow) {
929 if (is_strong) {
930 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow_narrow);
931 } else if (is_weak) {
932 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow_narrow);
933 } else if (is_phantom) {
934 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow_narrow);
935 }
936 } else {
937 if (is_strong) {
938 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
939 } else if (is_weak) {
940 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
941 } else if (is_phantom) {
942 return CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
943 }
944 }
945
946 ShouldNotReachHere();
947 return nullptr;
948 }
949
950 bool ShenandoahBarrierSetC2State::needs_liveness_data(const MachNode* mach) const {
951 // Nodes that require slow-path stubs need liveness data.
952 return ShenandoahBarrierStubC2::needs_slow_barrier(mach);
953 }
954
955 bool ShenandoahBarrierSetC2State::needs_livein_data() const {
956 return true;
957 }