1 /*
2 * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahForwarding.hpp"
30 #include "gc/shenandoah/shenandoahHeap.hpp"
31 #include "gc/shenandoah/shenandoahRuntime.hpp"
32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
33 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
34 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
36 #include "opto/arraycopynode.hpp"
37 #include "opto/escape.hpp"
38 #include "opto/graphKit.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/macro.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/narrowptrnode.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45
46 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
47 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
48 }
49
50 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
51 : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, nullptr)),
52 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, nullptr)) {
53 }
54
55 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
433 // Use the pre-barrier to record the value in the referent field
434 satb_write_barrier_pre(kit, false /* do_load */,
435 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
436 pre_val /* pre_val */,
437 T_OBJECT);
438 if (need_mem_bar) {
439 // Add memory barrier to prevent commoning reads from this field
440 // across safepoint since GC can change its value.
441 kit->insert_mem_bar(Op_MemBarCPUOrder);
442 }
443 // Update IdealKit from graphKit.
444 __ sync_kit(kit);
445
446 } __ end_if(); // _ref_type != ref_none
447 } __ end_if(); // offset == referent_offset
448
449 // Final sync IdealKit and GraphKit.
450 kit->final_sync(ideal);
451 }
452
453 #undef __
454
455 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
456 const Type **fields = TypeTuple::fields(2);
457 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
458 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
459 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
460
461 // create result type (range)
462 fields = TypeTuple::fields(0);
463 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
464
465 return TypeFunc::make(domain, range);
466 }
467
468 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
469 const Type **fields = TypeTuple::fields(1);
470 fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
471 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
472
496 DecoratorSet decorators = access.decorators();
497
498 const TypePtr* adr_type = access.addr().type();
499 Node* adr = access.addr().node();
500
501 if (!access.is_oop()) {
502 return BarrierSetC2::store_at_resolved(access, val);
503 }
504
505 if (access.is_parse_access()) {
506 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
507 GraphKit* kit = parse_access.kit();
508
509 uint adr_idx = kit->C->get_alias_index(adr_type);
510 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
511 Node* value = val.node();
512 value = shenandoah_iu_barrier(kit, value);
513 val.set_node(value);
514 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
515 static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
516 } else {
517 assert(access.is_opt_access(), "only for optimization passes");
518 assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
519 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
520 PhaseGVN& gvn = opt_access.gvn();
521
522 if (ShenandoahIUBarrier) {
523 Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
524 val.set_node(enqueue);
525 }
526 }
527 return BarrierSetC2::store_at_resolved(access, val);
528 }
529
530 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
531 // 1: non-reference load, no additional barrier is needed
532 if (!access.is_oop()) {
533 return BarrierSetC2::load_at_resolved(access, val_type);
534 }
535
536 Node* load = BarrierSetC2::load_at_resolved(access, val_type);
537 DecoratorSet decorators = access.decorators();
538 BasicType type = access.type();
539
540 // 2: apply LRB if needed
541 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
542 load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
543 if (access.is_parse_access()) {
544 load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
545 } else {
546 load = static_cast<C2OptAccess &>(access).gvn().transform(load);
547 }
578
579 if (on_weak_ref) {
580 // Use the pre-barrier to record the value in the referent field
581 satb_write_barrier_pre(kit, false /* do_load */,
582 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
583 load /* pre_val */, T_OBJECT);
584 // Add memory barrier to prevent commoning reads from this field
585 // across safepoint since GC can change its value.
586 kit->insert_mem_bar(Op_MemBarCPUOrder);
587 } else if (unknown) {
588 // We do not require a mem bar inside pre_barrier if need_mem_bar
589 // is set: the barriers would be emitted by us.
590 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
591 }
592 }
593
594 return load;
595 }
596
597 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
598 Node* new_val, const Type* value_type) const {
599 GraphKit* kit = access.kit();
600 if (access.is_oop()) {
601 new_val = shenandoah_iu_barrier(kit, new_val);
602 shenandoah_write_barrier_pre(kit, false /* do_load */,
603 nullptr, nullptr, max_juint, nullptr, nullptr,
604 expected_val /* pre_val */, T_OBJECT);
605
606 MemNode::MemOrd mo = access.mem_node_mo();
607 Node* mem = access.memory();
608 Node* adr = access.addr().node();
609 const TypePtr* adr_type = access.addr().type();
610 Node* load_store = nullptr;
611
612 #ifdef _LP64
613 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
614 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
615 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
616 if (ShenandoahCASBarrier) {
617 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
618 } else {
620 }
621 } else
622 #endif
623 {
624 if (ShenandoahCASBarrier) {
625 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
626 } else {
627 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
628 }
629 }
630
631 access.set_raw_access(load_store);
632 pin_atomic_op(access);
633
634 #ifdef _LP64
635 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
636 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
637 }
638 #endif
639 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
640 return load_store;
641 }
642 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
643 }
644
645 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
646 Node* new_val, const Type* value_type) const {
647 GraphKit* kit = access.kit();
648 if (access.is_oop()) {
649 new_val = shenandoah_iu_barrier(kit, new_val);
650 shenandoah_write_barrier_pre(kit, false /* do_load */,
651 nullptr, nullptr, max_juint, nullptr, nullptr,
652 expected_val /* pre_val */, T_OBJECT);
653 DecoratorSet decorators = access.decorators();
654 MemNode::MemOrd mo = access.mem_node_mo();
655 Node* mem = access.memory();
656 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
657 Node* load_store = nullptr;
658 Node* adr = access.addr().node();
659 #ifdef _LP64
675 }
676 } else
677 #endif
678 {
679 if (ShenandoahCASBarrier) {
680 if (is_weak_cas) {
681 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
682 } else {
683 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
684 }
685 } else {
686 if (is_weak_cas) {
687 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
688 } else {
689 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
690 }
691 }
692 }
693 access.set_raw_access(load_store);
694 pin_atomic_op(access);
695 return load_store;
696 }
697 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
698 }
699
700 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
701 GraphKit* kit = access.kit();
702 if (access.is_oop()) {
703 val = shenandoah_iu_barrier(kit, val);
704 }
705 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
706 if (access.is_oop()) {
707 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
708 shenandoah_write_barrier_pre(kit, false /* do_load */,
709 nullptr, nullptr, max_juint, nullptr, nullptr,
710 result /* pre_val */, T_OBJECT);
711 }
712 return result;
713 }
714
715
716 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
717 return is_shenandoah_wb_pre_call(node);
718 }
719
720 // Support for GC barriers emitted during parsing
721 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
722 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier || node->Opcode() == Op_ShenandoahIUBarrier) return true;
723 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
724 return false;
725 }
726 CallLeafNode *call = node->as_CallLeaf();
727 if (call->_name == nullptr) {
728 return false;
729 }
730
889
890 // Support for macro expanded GC barriers
891 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
892 if (node->Opcode() == Op_ShenandoahIUBarrier) {
893 state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
894 }
895 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
896 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
897 }
898 }
899
900 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
901 if (node->Opcode() == Op_ShenandoahIUBarrier) {
902 state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
903 }
904 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
905 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
906 }
907 }
908
909 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
910 if (is_shenandoah_wb_pre_call(n)) {
911 shenandoah_eliminate_wb_pre(n, ¯o->igvn());
912 }
913 }
914
915 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
916 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
917 Node* c = call->as_Call()->proj_out(TypeFunc::Control);
918 c = c->unique_ctrl_out();
919 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
920 c = c->unique_ctrl_out();
921 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
922 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
923 assert(iff->is_If(), "expect test");
924 if (!is_shenandoah_marking_if(igvn, iff)) {
925 c = c->unique_ctrl_out();
926 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
927 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
928 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
929 }
930 Node* cmpx = iff->in(1)->in(1);
931 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
|
1 /*
2 * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
35 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
36 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
37 #include "gc/shenandoah/mode/shenandoahMode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/escape.hpp"
40 #include "opto/graphKit.hpp"
41 #include "opto/idealKit.hpp"
42 #include "opto/macro.hpp"
43 #include "opto/movenode.hpp"
44 #include "opto/narrowptrnode.hpp"
45 #include "opto/rootnode.hpp"
46 #include "opto/runtime.hpp"
47
48 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
49 return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
50 }
51
52 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
53 : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, nullptr)),
54 _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, nullptr)) {
55 }
56
57 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
435 // Use the pre-barrier to record the value in the referent field
436 satb_write_barrier_pre(kit, false /* do_load */,
437 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
438 pre_val /* pre_val */,
439 T_OBJECT);
440 if (need_mem_bar) {
441 // Add memory barrier to prevent commoning reads from this field
442 // across safepoint since GC can change its value.
443 kit->insert_mem_bar(Op_MemBarCPUOrder);
444 }
445 // Update IdealKit from graphKit.
446 __ sync_kit(kit);
447
448 } __ end_if(); // _ref_type != ref_none
449 } __ end_if(); // offset == referent_offset
450
451 // Final sync IdealKit and GraphKit.
452 kit->final_sync(ideal);
453 }
454
455 Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
456 BarrierSet* bs = BarrierSet::barrier_set();
457 ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
458 CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base();
459 if (card_table_base != nullptr) {
460 return kit->makecon(TypeRawPtr::make((address)card_table_base));
461 } else {
462 return kit->null();
463 }
464 }
465
466 void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
467 Node* ctl,
468 Node* oop_store,
469 Node* obj,
470 Node* adr,
471 uint adr_idx,
472 Node* val,
473 BasicType bt,
474 bool use_precise) const {
475 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
476
477 // No store check needed if we're storing a null.
478 if (val != nullptr && val->is_Con()) {
479 // must be either an oop or NULL
480 const Type* t = val->bottom_type();
481 if (t == TypePtr::NULL_PTR || t == Type::TOP)
482 return;
483 }
484
485 if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
486 // We can skip marks on a freshly-allocated object in Eden.
487 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
488 // That routine informs GC to take appropriate compensating steps,
489 // upon a slow-path allocation, so as to make this card-mark
490 // elision safe.
491 return;
492 }
493
494 if (!use_precise) {
495 // All card marks for a (non-array) instance are in one place:
496 adr = obj;
497 }
498 // (Else it's an array (or unknown), and we want more precise card marks.)
499 assert(adr != nullptr, "");
500
501 IdealKit ideal(kit, true);
502
503 // Convert the pointer to an int prior to doing math on it
504 Node* cast = __ CastPX(__ ctrl(), adr);
505
506 // Divide by card size
507 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );
508
509 // Combine card table base and card offset
510 Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
511
512 // Get the alias_index for raw card-mark memory
513 int adr_type = Compile::AliasIdxRaw;
514 Node* zero = __ ConI(0); // Dirty card value
515
516 if (UseCondCardMark) {
517 // The classic GC reference write barrier is typically implemented
518 // as a store into the global card mark table. Unfortunately
519 // unconditional stores can result in false sharing and excessive
520 // coherence traffic as well as false transactional aborts.
521 // UseCondCardMark enables MP "polite" conditional card mark
522 // stores. In theory we could relax the load from ctrl() to
523 // no_ctrl, but that doesn't buy much latitude.
524 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
525 __ if_then(card_val, BoolTest::ne, zero);
526 }
527
528 // Smash zero into card
529 __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
530
531 if (UseCondCardMark) {
532 __ end_if();
533 }
534
535 // Final sync IdealKit and GraphKit.
536 kit->final_sync(ideal);
537 }
538
539 #undef __
540
541 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
542 const Type **fields = TypeTuple::fields(2);
543 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
544 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
545 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
546
547 // create result type (range)
548 fields = TypeTuple::fields(0);
549 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
550
551 return TypeFunc::make(domain, range);
552 }
553
554 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
555 const Type **fields = TypeTuple::fields(1);
556 fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
557 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
558
582 DecoratorSet decorators = access.decorators();
583
584 const TypePtr* adr_type = access.addr().type();
585 Node* adr = access.addr().node();
586
587 if (!access.is_oop()) {
588 return BarrierSetC2::store_at_resolved(access, val);
589 }
590
591 if (access.is_parse_access()) {
592 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
593 GraphKit* kit = parse_access.kit();
594
595 uint adr_idx = kit->C->get_alias_index(adr_type);
596 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
597 Node* value = val.node();
598 value = shenandoah_iu_barrier(kit, value);
599 val.set_node(value);
600 shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
601 static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
602
603 Node* result = BarrierSetC2::store_at_resolved(access, val);
604
605 if (ShenandoahCardBarrier) {
606 const bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
607 const bool is_array = (decorators & IS_ARRAY) != 0;
608 const bool use_precise = is_array || anonymous;
609 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
610 adr, adr_idx, val.node(), access.type(), use_precise);
611 }
612 return result;
613 } else {
614 assert(access.is_opt_access(), "only for optimization passes");
615 assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
616 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
617 PhaseGVN& gvn = opt_access.gvn();
618
619 if (ShenandoahIUBarrier) {
620 Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node()));
621 val.set_node(enqueue);
622 }
623 return BarrierSetC2::store_at_resolved(access, val);
624 }
625 }
626
627 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
628 // 1: non-reference load, no additional barrier is needed
629 if (!access.is_oop()) {
630 return BarrierSetC2::load_at_resolved(access, val_type);
631 }
632
633 Node* load = BarrierSetC2::load_at_resolved(access, val_type);
634 DecoratorSet decorators = access.decorators();
635 BasicType type = access.type();
636
637 // 2: apply LRB if needed
638 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
639 load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
640 if (access.is_parse_access()) {
641 load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
642 } else {
643 load = static_cast<C2OptAccess &>(access).gvn().transform(load);
644 }
675
676 if (on_weak_ref) {
677 // Use the pre-barrier to record the value in the referent field
678 satb_write_barrier_pre(kit, false /* do_load */,
679 nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
680 load /* pre_val */, T_OBJECT);
681 // Add memory barrier to prevent commoning reads from this field
682 // across safepoint since GC can change its value.
683 kit->insert_mem_bar(Op_MemBarCPUOrder);
684 } else if (unknown) {
685 // We do not require a mem bar inside pre_barrier if need_mem_bar
686 // is set: the barriers would be emitted by us.
687 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
688 }
689 }
690
691 return load;
692 }
693
694 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
695 Node* new_val, const Type* value_type) const {
696 GraphKit* kit = access.kit();
697 if (access.is_oop()) {
698 new_val = shenandoah_iu_barrier(kit, new_val);
699 shenandoah_write_barrier_pre(kit, false /* do_load */,
700 nullptr, nullptr, max_juint, nullptr, nullptr,
701 expected_val /* pre_val */, T_OBJECT);
702
703 MemNode::MemOrd mo = access.mem_node_mo();
704 Node* mem = access.memory();
705 Node* adr = access.addr().node();
706 const TypePtr* adr_type = access.addr().type();
707 Node* load_store = nullptr;
708
709 #ifdef _LP64
710 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
711 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
712 Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
713 if (ShenandoahCASBarrier) {
714 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
715 } else {
717 }
718 } else
719 #endif
720 {
721 if (ShenandoahCASBarrier) {
722 load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
723 } else {
724 load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
725 }
726 }
727
728 access.set_raw_access(load_store);
729 pin_atomic_op(access);
730
731 #ifdef _LP64
732 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
733 load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
734 }
735 #endif
736 load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
737 if (ShenandoahCardBarrier) {
738 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
739 access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
740 }
741 return load_store;
742 }
743 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
744 }
745
746 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
747 Node* new_val, const Type* value_type) const {
748 GraphKit* kit = access.kit();
749 if (access.is_oop()) {
750 new_val = shenandoah_iu_barrier(kit, new_val);
751 shenandoah_write_barrier_pre(kit, false /* do_load */,
752 nullptr, nullptr, max_juint, nullptr, nullptr,
753 expected_val /* pre_val */, T_OBJECT);
754 DecoratorSet decorators = access.decorators();
755 MemNode::MemOrd mo = access.mem_node_mo();
756 Node* mem = access.memory();
757 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
758 Node* load_store = nullptr;
759 Node* adr = access.addr().node();
760 #ifdef _LP64
776 }
777 } else
778 #endif
779 {
780 if (ShenandoahCASBarrier) {
781 if (is_weak_cas) {
782 load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
783 } else {
784 load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
785 }
786 } else {
787 if (is_weak_cas) {
788 load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
789 } else {
790 load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
791 }
792 }
793 }
794 access.set_raw_access(load_store);
795 pin_atomic_op(access);
796 if (ShenandoahCardBarrier) {
797 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
798 access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
799 }
800 return load_store;
801 }
802 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
803 }
804
805 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
806 GraphKit* kit = access.kit();
807 if (access.is_oop()) {
808 val = shenandoah_iu_barrier(kit, val);
809 }
810 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
811 if (access.is_oop()) {
812 result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
813 shenandoah_write_barrier_pre(kit, false /* do_load */,
814 nullptr, nullptr, max_juint, nullptr, nullptr,
815 result /* pre_val */, T_OBJECT);
816 if (ShenandoahCardBarrier) {
817 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
818 access.addr().node(), access.alias_idx(), val, T_OBJECT, true);
819 }
820 }
821 return result;
822 }
823
824
825 bool ShenandoahBarrierSetC2::is_gc_pre_barrier_node(Node* node) const {
826 return is_shenandoah_wb_pre_call(node);
827 }
828
829 // Support for GC barriers emitted during parsing
830 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
831 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier || node->Opcode() == Op_ShenandoahIUBarrier) return true;
832 if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
833 return false;
834 }
835 CallLeafNode *call = node->as_CallLeaf();
836 if (call->_name == nullptr) {
837 return false;
838 }
839
998
999 // Support for macro expanded GC barriers
1000 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
1001 if (node->Opcode() == Op_ShenandoahIUBarrier) {
1002 state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
1003 }
1004 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1005 state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1006 }
1007 }
1008
1009 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
1010 if (node->Opcode() == Op_ShenandoahIUBarrier) {
1011 state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
1012 }
1013 if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1014 state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
1015 }
1016 }
1017
1018 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
1019 if (is_shenandoah_wb_pre_call(node)) {
1020 shenandoah_eliminate_wb_pre(node, ¯o->igvn());
1021 }
1022 if (ShenandoahCardBarrier && node->Opcode() == Op_CastP2X) {
1023 Node* shift = node->unique_out();
1024 Node* addp = shift->unique_out();
1025 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
1026 Node* mem = addp->last_out(j);
1027 if (UseCondCardMark && mem->is_Load()) {
1028 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
1029 // The load is checking if the card has been written so
1030 // replace it with zero to fold the test.
1031 macro->replace_node(mem, macro->intcon(0));
1032 continue;
1033 }
1034 assert(mem->is_Store(), "store required");
1035 macro->replace_node(mem, mem->in(MemNode::Memory));
1036 }
1037 }
1038 }
1039
1040 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
1041 assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
1042 Node* c = call->as_Call()->proj_out(TypeFunc::Control);
1043 c = c->unique_ctrl_out();
1044 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1045 c = c->unique_ctrl_out();
1046 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1047 Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1048 assert(iff->is_If(), "expect test");
1049 if (!is_shenandoah_marking_if(igvn, iff)) {
1050 c = c->unique_ctrl_out();
1051 assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
1052 iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
1053 assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
1054 }
1055 Node* cmpx = iff->in(1)->in(1);
1056 igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
|