1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahForwarding.hpp"
30 #include "gc/shenandoah/shenandoahHeap.hpp"
31 #include "gc/shenandoah/shenandoahRuntime.hpp"
32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
33 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
34 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
36 #include "opto/arraycopynode.hpp"
37 #include "opto/escape.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/macro.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/narrowptrnode.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45
46 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
47 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
48 }
49
50 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
51 : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, nullptr)),
52 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, nullptr)) {
53 }
54
55 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
223
224 Node* no_base = __ top();
225 Node* zero = __ ConI(0);
226 Node* zeroX = __ ConX(0);
227
228 float likely = PROB_LIKELY(0.999);
229 float unlikely = PROB_UNLIKELY(0.999);
230
231 // Offsets into the thread
232 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
233 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
234
235 // Now the actual pointers into the thread
236 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
237 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
238
239 // Now some of the values
240 Node* marking;
241 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
242 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
243 marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
244 assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
245
246 // if (!marking)
247 __ if_then(marking, BoolTest::ne, zero, unlikely); {
248 BasicType index_bt = TypeX_X->basic_type();
249 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
250 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
251
252 if (do_load) {
253 // load original value
254 // alias_idx correct??
255 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
256 }
257
258 // if (pre_val != nullptr)
259 __ if_then(pre_val, BoolTest::ne, kit->null()); {
260 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
261
262 // is the queue for this thread full?
263 __ if_then(index, BoolTest::ne, zeroX, likely); {
304 address entry_point = call->as_CallLeaf()->entry_point();
305 return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)) ||
306 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)) ||
307 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)) ||
308 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)) ||
309 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
310 }
311
312 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
313 if (n->Opcode() != Op_If) {
314 return false;
315 }
316
317 Node* bol = n->in(1);
318 assert(bol->is_Bool(), "");
319 Node* cmpx = bol->in(1);
320 if (bol->as_Bool()->_test._test == BoolTest::ne &&
321 cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
322 is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
323 cmpx->in(1)->in(2)->is_Con() &&
324 cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
325 return true;
326 }
327
328 return false;
329 }
330
331 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
332 if (!n->is_Load()) return false;
333 const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
334 return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
335 && n->in(2)->in(3)->is_Con()
336 && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
337 }
338
339 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
340 bool do_load,
341 Node* obj,
342 Node* adr,
343 uint alias_idx,
344 Node* val,
433 // Use the pre-barrier to record the value in the referent field
434 satb_write_barrier_pre(kit, false /* do_load */,
435 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
436 pre_val /* pre_val */,
437 T_OBJECT);
438 if (need_mem_bar) {
439 // Add memory barrier to prevent commoning reads from this field
440 // across safepoint since GC can change its value.
441 kit->insert_mem_bar(Op_MemBarCPUOrder);
442 }
443 // Update IdealKit from graphKit.
444 __ sync_kit(kit);
445
446 } __ end_if(); // _ref_type != ref_none
447 } __ end_if(); // offset == referent_offset
448
449 // Final sync IdealKit and GraphKit.
450 kit->final_sync(ideal);
451 }
452
453 #undef __
454
455 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
456 const Type **fields = TypeTuple::fields(2);
457 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
458 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
459 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
460
461 // create result type (range)
462 fields = TypeTuple::fields(0);
463 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
464
465 return TypeFunc::make(domain, range);
466 }
467
468 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
469 const Type **fields = TypeTuple::fields(1);
470 fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
471 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
472
496 DecoratorSet decorators = access.decorators();
497
498 const TypePtr* adr_type = access.addr().type();
499 Node* adr = access.addr().node();
500
501 if (!access.is_oop()) {
502 return BarrierSetC2::store_at_resolved(access, val);
503 }
504
505 if (access.is_parse_access()) {
506 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
507 GraphKit* kit = parse_access.kit();
508
509 uint adr_idx = kit->C->get_alias_index(adr_type);
510 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
511 Node* value = val.node();
512 value = shenandoah_iu_barrier(kit, value);
513 val.set_node(value);
514 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
515 static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
516 } else {
517 assert(access.is_opt_access(), "only for optimization passes");
518 assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
519 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
520 PhaseGVN& gvn = opt_access.gvn();
521
522 if (ShenandoahIUBarrier) {
523 Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
524 val.set_node(enqueue);
525 }
526 }
527 return BarrierSetC2::store_at_resolved(access, val);
528 }
529
530 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
531 // 1: non-reference load, no additional barrier is needed
532 if (!access.is_oop()) {
533 return BarrierSetC2::load_at_resolved(access, val_type);;
534 }
535
536 Node* load = BarrierSetC2::load_at_resolved(access, val_type);
537 DecoratorSet decorators = access.decorators();
538 BasicType type = access.type();
539
540 // 2: apply LRB if needed
541 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
542 load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
543 if (access.is_parse_access()) {
544 load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
545 } else {
546 load = static_cast<C2OptAccess &>(access).gvn().transform(load);
547 }
578
579 if (on_weak_ref) {
580 // Use the pre-barrier to record the value in the referent field
581 satb_write_barrier_pre(kit, false /* do_load */,
582 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
583 load /* pre_val */, T_OBJECT);
584 // Add memory barrier to prevent commoning reads from this field
585 // across safepoint since GC can change its value.
586 kit->insert_mem_bar(Op_MemBarCPUOrder);
587 } else if (unknown) {
588 // We do not require a mem bar inside pre_barrier if need_mem_bar
589 // is set: the barriers would be emitted by us.
590 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
591 }
592 }
593
594 return load;
595 }
596
597 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
598 Node* new_val, const Type* value_type) const {
599 GraphKit* kit = access.kit();
600 if (access.is_oop()) {
601 new_val = shenandoah_iu_barrier(kit, new_val);
602 shenandoah_write_barrier_pre(kit, false /* do_load */,
603 nullptr, nullptr, max_juint, nullptr, nullptr,
604 expected_val /* pre_val */, T_OBJECT);
605
606 MemNode::MemOrd mo = access.mem_node_mo();
607 Node* mem = access.memory();
608 Node* adr = access.addr().node();
609 const TypePtr* adr_type = access.addr().type();
610 Node* load_store = nullptr;
611
612 #ifdef _LP64
613 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
614 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
615 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
616 if (ShenandoahCASBarrier) {
617 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
618 } else {
620 }
621 } else
622 #endif
623 {
624 if (ShenandoahCASBarrier) {
625 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
626 } else {
627 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
628 }
629 }
630
631 access.set_raw_access(load_store);
632 pin_atomic_op(access);
633
634 #ifdef _LP64
635 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
636 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
637 }
638 #endif
639 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
640 return load_store;
641 }
642 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
643 }
644
645 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
646 Node* new_val, const Type* value_type) const {
647 GraphKit* kit = access.kit();
648 if (access.is_oop()) {
649 new_val = shenandoah_iu_barrier(kit, new_val);
650 shenandoah_write_barrier_pre(kit, false /* do_load */,
651 nullptr, nullptr, max_juint, nullptr, nullptr,
652 expected_val /* pre_val */, T_OBJECT);
653 DecoratorSet decorators = access.decorators();
654 MemNode::MemOrd mo = access.mem_node_mo();
655 Node* mem = access.memory();
656 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
657 Node* load_store = nullptr;
658 Node* adr = access.addr().node();
659 #ifdef _LP64
675 }
676 } else
677 #endif
678 {
679 if (ShenandoahCASBarrier) {
680 if (is_weak_cas) {
681 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
682 } else {
683 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
684 }
685 } else {
686 if (is_weak_cas) {
687 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
688 } else {
689 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
690 }
691 }
692 }
693 access.set_raw_access(load_store);
694 pin_atomic_op(access);
695 return load_store;
696 }
697 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
698 }
699
700 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
701 GraphKit* kit = access.kit();
702 if (access.is_oop()) {
703 val = shenandoah_iu_barrier(kit, val);
704 }
705 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
706 if (access.is_oop()) {
707 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
708 shenandoah_write_barrier_pre(kit, false /* do_load */,
709 nullptr, nullptr, max_juint, nullptr, nullptr,
710 result /* pre_val */, T_OBJECT);
711 }
712 return result;
713 }
714
715
716 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
717 return is_shenandoah_wb_pre_call(node);
718 }
719
720 // Support for GC barriers emitted during parsing
721 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
722 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier || node->Opcode() == Op_ShenandoahIUBarrier) return true;
723 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
724 return false;
725 }
726 CallLeafNode *call = node->as_CallLeaf();
727 if (call->_name == nullptr) {
728 return false;
729 }
730
821
822 if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
823 // Check if heap is has forwarded objects. If it does, we need to call into the special
824 // routine that would fix up source references before we can continue.
825
826 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
827 Node* region = new RegionNode(PATH_LIMIT);
828 Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
829
830 Node* thread = phase->transform_later(new ThreadLocalNode());
831 Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
832 Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
833
834 uint gc_state_idx = Compile::AliasIdxRaw;
835 const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
836 debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
837
838 Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
839 int flags = ShenandoahHeap::HAS_FORWARDED;
840 if (ShenandoahIUBarrier) {
841 flags |= ShenandoahHeap::MARKING;
842 }
843 Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(flags)));
844 Node* stable_cmp = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
845 Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
846
847 IfNode* stable_iff = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
848 Node* stable_ctrl = phase->transform_later(new IfFalseNode(stable_iff));
849 Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
850
851 // Heap is stable, no need to do anything additional
852 region->init_req(_heap_stable, stable_ctrl);
853 mem_phi->init_req(_heap_stable, mem);
854
855 // Heap is unstable, call into clone barrier stub
856 Node* call = phase->make_leaf_call(unstable_ctrl, mem,
857 ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
858 CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
859 "shenandoah_clone",
860 TypeRawPtr::BOTTOM,
861 src_base);
889
890 // Support for macro expanded GC barriers
891 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
892 if (node->Opcode() == Op_ShenandoahIUBarrier) {
893 state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
894 }
895 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
896 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
897 }
898 }
899
900 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
901 if (node->Opcode() == Op_ShenandoahIUBarrier) {
902 state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
903 }
904 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
905 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
906 }
907 }
908
909 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
910 if (is_shenandoah_wb_pre_call(n)) {
911 shenandoah_eliminate_wb_pre(n, ¯o->igvn());
912 }
913 }
914
915 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
916 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
917 Node* c = call->as_Call()->proj_out(TypeFunc::Control);
918 c = c->unique_ctrl_out();
919 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
920 c = c->unique_ctrl_out();
921 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
922 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
923 assert(iff->is_If(), "expect test");
924 if (!is_shenandoah_marking_if(igvn, iff)) {
925 c = c->unique_ctrl_out();
926 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
927 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
928 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
929 }
930 Node* cmpx = iff->in(1)->in(1);
931 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
|
1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
35 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
36 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
37 #include "gc/shenandoah/mode/shenandoahMode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/escape.hpp"
40 #include "opto/graphKit.hpp"
41 #include "opto/idealKit.hpp"
42 #include "opto/macro.hpp"
43 #include "opto/movenode.hpp"
44 #include "opto/narrowptrnode.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47
48 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
49 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
50 }
51
52 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
53 : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, nullptr)),
54 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, nullptr)) {
55 }
56
57 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
225
226 Node* no_base = __ top();
227 Node* zero = __ ConI(0);
228 Node* zeroX = __ ConX(0);
229
230 float likely = PROB_LIKELY(0.999);
231 float unlikely = PROB_UNLIKELY(0.999);
232
233 // Offsets into the thread
234 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
235 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
236
237 // Now the actual pointers into the thread
238 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
239 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
240
241 // Now some of the values
242 Node* marking;
243 Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
244 Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
245 marking = __ AndI(ld, __ ConI(ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING));
246 assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
247
248 // if (!marking)
249 __ if_then(marking, BoolTest::ne, zero, unlikely); {
250 BasicType index_bt = TypeX_X->basic_type();
251 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
252 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
253
254 if (do_load) {
255 // load original value
256 // alias_idx correct??
257 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
258 }
259
260 // if (pre_val != nullptr)
261 __ if_then(pre_val, BoolTest::ne, kit->null()); {
262 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
263
264 // is the queue for this thread full?
265 __ if_then(index, BoolTest::ne, zeroX, likely); {
306 address entry_point = call->as_CallLeaf()->entry_point();
307 return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)) ||
308 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)) ||
309 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)) ||
310 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)) ||
311 (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
312 }
313
314 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
315 if (n->Opcode() != Op_If) {
316 return false;
317 }
318
319 Node* bol = n->in(1);
320 assert(bol->is_Bool(), "");
321 Node* cmpx = bol->in(1);
322 if (bol->as_Bool()->_test._test == BoolTest::ne &&
323 cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
324 is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
325 cmpx->in(1)->in(2)->is_Con() &&
326 cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING)) {
327 return true;
328 }
329
330 return false;
331 }
332
333 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
334 if (!n->is_Load()) return false;
335 const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
336 return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
337 && n->in(2)->in(3)->is_Con()
338 && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
339 }
340
341 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
342 bool do_load,
343 Node* obj,
344 Node* adr,
345 uint alias_idx,
346 Node* val,
435 // Use the pre-barrier to record the value in the referent field
436 satb_write_barrier_pre(kit, false /* do_load */,
437 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
438 pre_val /* pre_val */,
439 T_OBJECT);
440 if (need_mem_bar) {
441 // Add memory barrier to prevent commoning reads from this field
442 // across safepoint since GC can change its value.
443 kit->insert_mem_bar(Op_MemBarCPUOrder);
444 }
445 // Update IdealKit from graphKit.
446 __ sync_kit(kit);
447
448 } __ end_if(); // _ref_type != ref_none
449 } __ end_if(); // offset == referent_offset
450
451 // Final sync IdealKit and GraphKit.
452 kit->final_sync(ideal);
453 }
454
455 Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
456 BarrierSet* bs = BarrierSet::barrier_set();
457 ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
458 CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base();
459 if (card_table_base != nullptr) {
460 return kit->makecon(TypeRawPtr::make((address)card_table_base));
461 } else {
462 return kit->null();
463 }
464 }
465
466 void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
467 Node* ctl,
468 Node* oop_store,
469 Node* obj,
470 Node* adr,
471 uint adr_idx,
472 Node* val,
473 BasicType bt,
474 bool use_precise) const {
475 if (!ShenandoahHeap::heap()->mode()->is_generational()) {
476 return;
477 }
478
479 // No store check needed if we're storing a null.
480 if (val != nullptr && val->is_Con()) {
481 // must be either an oop or NULL
482 const Type* t = val->bottom_type();
483 if (t == TypePtr::NULL_PTR || t == Type::TOP)
484 return;
485 }
486
487 if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
488 // We can skip marks on a freshly-allocated object in Eden.
489 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
490 // That routine informs GC to take appropriate compensating steps,
491 // upon a slow-path allocation, so as to make this card-mark
492 // elision safe.
493 return;
494 }
495
496 if (!use_precise) {
497 // All card marks for a (non-array) instance are in one place:
498 adr = obj;
499 }
500 // (Else it's an array (or unknown), and we want more precise card marks.)
501 assert(adr != nullptr, "");
502
503 IdealKit ideal(kit, true);
504
505 // Convert the pointer to an int prior to doing math on it
506 Node* cast = __ CastPX(__ ctrl(), adr);
507
508 // Divide by card size
509 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );
510
511 // Combine card table base and card offset
512 Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
513
514 // Get the alias_index for raw card-mark memory
515 int adr_type = Compile::AliasIdxRaw;
516 Node* zero = __ ConI(0); // Dirty card value
517
518 if (UseCondCardMark) {
519 // The classic GC reference write barrier is typically implemented
520 // as a store into the global card mark table. Unfortunately
521 // unconditional stores can result in false sharing and excessive
522 // coherence traffic as well as false transactional aborts.
523 // UseCondCardMark enables MP "polite" conditional card mark
524 // stores. In theory we could relax the load from ctrl() to
525 // no_ctrl, but that doesn't buy much latitude.
526 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
527 __ if_then(card_val, BoolTest::ne, zero);
528 }
529
530 // Smash zero into card
531 __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
532
533 if (UseCondCardMark) {
534 __ end_if();
535 }
536
537 // Final sync IdealKit and GraphKit.
538 kit->final_sync(ideal);
539 }
540
541 #undef __
542
543 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
544 const Type **fields = TypeTuple::fields(2);
545 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
546 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
547 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
548
549 // create result type (range)
550 fields = TypeTuple::fields(0);
551 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
552
553 return TypeFunc::make(domain, range);
554 }
555
556 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
557 const Type **fields = TypeTuple::fields(1);
558 fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
559 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
560
584 DecoratorSet decorators = access.decorators();
585
586 const TypePtr* adr_type = access.addr().type();
587 Node* adr = access.addr().node();
588
589 if (!access.is_oop()) {
590 return BarrierSetC2::store_at_resolved(access, val);
591 }
592
593 if (access.is_parse_access()) {
594 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
595 GraphKit* kit = parse_access.kit();
596
597 uint adr_idx = kit->C->get_alias_index(adr_type);
598 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
599 Node* value = val.node();
600 value = shenandoah_iu_barrier(kit, value);
601 val.set_node(value);
602 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
603 static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
604
605 Node* result = BarrierSetC2::store_at_resolved(access, val);
606
607 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
608 bool is_array = (decorators & IS_ARRAY) != 0;
609 bool use_precise = is_array || anonymous;
610 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
611 adr, adr_idx, val.node(), access.type(), use_precise);
612 return result;
613 } else {
614 assert(access.is_opt_access(), "only for optimization passes");
615 assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
616 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
617 PhaseGVN& gvn = opt_access.gvn();
618
619 if (ShenandoahIUBarrier) {
620 Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
621 val.set_node(enqueue);
622 }
623 return BarrierSetC2::store_at_resolved(access, val);
624 }
625 }
626
627 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
628 // 1: non-reference load, no additional barrier is needed
629 if (!access.is_oop()) {
630 return BarrierSetC2::load_at_resolved(access, val_type);;
631 }
632
633 Node* load = BarrierSetC2::load_at_resolved(access, val_type);
634 DecoratorSet decorators = access.decorators();
635 BasicType type = access.type();
636
637 // 2: apply LRB if needed
638 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
639 load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
640 if (access.is_parse_access()) {
641 load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
642 } else {
643 load = static_cast<C2OptAccess &>(access).gvn().transform(load);
644 }
675
676 if (on_weak_ref) {
677 // Use the pre-barrier to record the value in the referent field
678 satb_write_barrier_pre(kit, false /* do_load */,
679 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
680 load /* pre_val */, T_OBJECT);
681 // Add memory barrier to prevent commoning reads from this field
682 // across safepoint since GC can change its value.
683 kit->insert_mem_bar(Op_MemBarCPUOrder);
684 } else if (unknown) {
685 // We do not require a mem bar inside pre_barrier if need_mem_bar
686 // is set: the barriers would be emitted by us.
687 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
688 }
689 }
690
691 return load;
692 }
693
694 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
695 Node* new_val, const Type* value_type) const {
696 GraphKit* kit = access.kit();
697 if (access.is_oop()) {
698 new_val = shenandoah_iu_barrier(kit, new_val);
699 shenandoah_write_barrier_pre(kit, false /* do_load */,
700 nullptr, nullptr, max_juint, nullptr, nullptr,
701 expected_val /* pre_val */, T_OBJECT);
702
703 MemNode::MemOrd mo = access.mem_node_mo();
704 Node* mem = access.memory();
705 Node* adr = access.addr().node();
706 const TypePtr* adr_type = access.addr().type();
707 Node* load_store = nullptr;
708
709 #ifdef _LP64
710 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
711 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
712 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
713 if (ShenandoahCASBarrier) {
714 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
715 } else {
717 }
718 } else
719 #endif
720 {
721 if (ShenandoahCASBarrier) {
722 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
723 } else {
724 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
725 }
726 }
727
728 access.set_raw_access(load_store);
729 pin_atomic_op(access);
730
731 #ifdef _LP64
732 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
733 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
734 }
735 #endif
736 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
737 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
738 access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
739 return load_store;
740 }
741 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
742 }
743
744 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
745 Node* new_val, const Type* value_type) const {
746 GraphKit* kit = access.kit();
747 if (access.is_oop()) {
748 new_val = shenandoah_iu_barrier(kit, new_val);
749 shenandoah_write_barrier_pre(kit, false /* do_load */,
750 nullptr, nullptr, max_juint, nullptr, nullptr,
751 expected_val /* pre_val */, T_OBJECT);
752 DecoratorSet decorators = access.decorators();
753 MemNode::MemOrd mo = access.mem_node_mo();
754 Node* mem = access.memory();
755 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
756 Node* load_store = nullptr;
757 Node* adr = access.addr().node();
758 #ifdef _LP64
774 }
775 } else
776 #endif
777 {
778 if (ShenandoahCASBarrier) {
779 if (is_weak_cas) {
780 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
781 } else {
782 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
783 }
784 } else {
785 if (is_weak_cas) {
786 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
787 } else {
788 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
789 }
790 }
791 }
792 access.set_raw_access(load_store);
793 pin_atomic_op(access);
794 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
795 access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
796 return load_store;
797 }
798 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
799 }
800
801 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
802 GraphKit* kit = access.kit();
803 if (access.is_oop()) {
804 val = shenandoah_iu_barrier(kit, val);
805 }
806 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
807 if (access.is_oop()) {
808 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
809 shenandoah_write_barrier_pre(kit, false /* do_load */,
810 nullptr, nullptr, max_juint, nullptr, nullptr,
811 result /* pre_val */, T_OBJECT);
812 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
813 access.addr().node(), access.alias_idx(), val, T_OBJECT, true);
814 }
815 return result;
816 }
817
818
819 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
820 return is_shenandoah_wb_pre_call(node);
821 }
822
823 // Support for GC barriers emitted during parsing
824 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
825 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier || node->Opcode() == Op_ShenandoahIUBarrier) return true;
826 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
827 return false;
828 }
829 CallLeafNode *call = node->as_CallLeaf();
830 if (call->_name == nullptr) {
831 return false;
832 }
833
924
925 if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
926 // Check if heap is has forwarded objects. If it does, we need to call into the special
927 // routine that would fix up source references before we can continue.
928
929 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
930 Node* region = new RegionNode(PATH_LIMIT);
931 Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
932
933 Node* thread = phase->transform_later(new ThreadLocalNode());
934 Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
935 Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
936
937 uint gc_state_idx = Compile::AliasIdxRaw;
938 const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
939 debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
940
941 Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
942 int flags = ShenandoahHeap::HAS_FORWARDED;
943 if (ShenandoahIUBarrier) {
944 flags |= ShenandoahHeap::YOUNG_MARKING;
945 }
946 Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(flags)));
947 Node* stable_cmp = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
948 Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
949
950 IfNode* stable_iff = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
951 Node* stable_ctrl = phase->transform_later(new IfFalseNode(stable_iff));
952 Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
953
954 // Heap is stable, no need to do anything additional
955 region->init_req(_heap_stable, stable_ctrl);
956 mem_phi->init_req(_heap_stable, mem);
957
958 // Heap is unstable, call into clone barrier stub
959 Node* call = phase->make_leaf_call(unstable_ctrl, mem,
960 ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
961 CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
962 "shenandoah_clone",
963 TypeRawPtr::BOTTOM,
964 src_base);
992
993 // Support for macro expanded GC barriers
994 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
995 if (node->Opcode() == Op_ShenandoahIUBarrier) {
996 state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
997 }
998 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
999 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1000 }
1001 }
1002
1003 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
1004 if (node->Opcode() == Op_ShenandoahIUBarrier) {
1005 state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
1006 }
1007 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1008 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1009 }
1010 }
1011
1012 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
1013 if (is_shenandoah_wb_pre_call(node)) {
1014 shenandoah_eliminate_wb_pre(node, ¯o->igvn());
1015 }
1016 if (node->Opcode() == Op_CastP2X && ShenandoahHeap::heap()->mode()->is_generational()) {
1017 Node* shift = node->unique_out();
1018 Node* addp = shift->unique_out();
1019 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
1020 Node* mem = addp->last_out(j);
1021 if (UseCondCardMark && mem->is_Load()) {
1022 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
1023 // The load is checking if the card has been written so
1024 // replace it with zero to fold the test.
1025 macro->replace_node(mem, macro->intcon(0));
1026 continue;
1027 }
1028 assert(mem->is_Store(), "store required");
1029 macro->replace_node(mem, mem->in(MemNode::Memory));
1030 }
1031 }
1032 }
1033
1034 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
1035 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
1036 Node* c = call->as_Call()->proj_out(TypeFunc::Control);
1037 c = c->unique_ctrl_out();
1038 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1039 c = c->unique_ctrl_out();
1040 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1041 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1042 assert(iff->is_If(), "expect test");
1043 if (!is_shenandoah_marking_if(igvn, iff)) {
1044 c = c->unique_ctrl_out();
1045 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1046 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1047 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
1048 }
1049 Node* cmpx = iff->in(1)->in(1);
1050 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
|