1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/ciUtilities.hpp" 26 #include "gc/shared/cardTable.hpp" 27 #include "gc/shared/cardTableBarrierSet.hpp" 28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp" 29 #include "gc/shared/gc_globals.hpp" 30 #include "opto/arraycopynode.hpp" 31 #include "opto/graphKit.hpp" 32 #include "opto/idealKit.hpp" 33 #include "opto/macro.hpp" 34 #include "utilities/macros.hpp" 35 36 #define __ ideal. 37 38 Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const { 39 // Get base of card map 40 CardTable::CardValue* card_table_base = ci_card_table_address(); 41 if (card_table_base != nullptr) { 42 return kit->makecon(TypeRawPtr::make((address)card_table_base)); 43 } else { 44 return kit->null(); 45 } 46 } 47 48 // vanilla post barrier 49 // Insert a write-barrier store. This is to let generational GC work; we have 50 // to flag all oop-stores before the next GC point. 51 void CardTableBarrierSetC2::post_barrier(GraphKit* kit, 52 Node* obj, 53 Node* adr, 54 Node* val, 55 bool use_precise) const { 56 // No store check needed if we're storing a null. 57 if (val != nullptr && val->is_Con()) { 58 const Type* t = val->bottom_type(); 59 if (t == TypePtr::NULL_PTR || t == Type::TOP) { 60 return; 61 } 62 } 63 64 if (use_ReduceInitialCardMarks() 65 && obj == kit->just_allocated_object(kit->control())) { 66 // We can skip marks on a freshly-allocated object in Eden. 67 // Keep this code in sync with CardTableBarrierSet::on_slowpath_allocation_exit. 68 // That routine informs GC to take appropriate compensating steps, 69 // upon a slow-path allocation, so as to make this card-mark 70 // elision safe. 71 return; 72 } 73 74 if (!use_precise) { 75 // All card marks for a (non-array) instance are in one place: 76 adr = obj; 77 } else { 78 // Else it's an array (or unknown), and we want more precise card marks. 79 } 80 81 assert(adr != nullptr, ""); 82 83 IdealKit ideal(kit, true); 84 85 // Convert the pointer to an int prior to doing math on it 86 Node* cast = __ CastPX(__ ctrl(), adr); 87 88 // Divide by card size 89 Node* card_offset = __ URShiftX(cast, __ ConI(CardTable::card_shift())); 90 91 // Combine card table base and card offset 92 Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset); 93 94 // Get the alias_index for raw card-mark memory 95 int adr_type = Compile::AliasIdxRaw; 96 97 // Dirty card value to store 98 Node* dirty = __ ConI(CardTable::dirty_card_val()); 99 100 if (UseCondCardMark) { 101 // The classic GC reference write barrier is typically implemented 102 // as a store into the global card mark table. Unfortunately 103 // unconditional stores can result in false sharing and excessive 104 // coherence traffic as well as false transactional aborts. 105 // UseCondCardMark enables MP "polite" conditional card mark 106 // stores. In theory we could relax the load from ctrl() to 107 // no_ctrl, but that doesn't buy much latitude. 108 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type); 109 __ if_then(card_val, BoolTest::ne, dirty); 110 } 111 112 // Smash dirty value into card 113 __ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered); 114 115 if (UseCondCardMark) { 116 __ end_if(); 117 } 118 119 // Final sync IdealKit and GraphKit. 120 kit->final_sync(ideal); 121 } 122 123 bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() { 124 return ReduceInitialCardMarks; 125 } 126 127 void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseIterGVN* igvn, Node* node) const { 128 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required"); 129 for (DUIterator_Last imin, i = node->last_outs(imin); i >= imin; --i) { 130 Node* shift = node->last_out(i); 131 for (DUIterator_Last jmin, j = shift->last_outs(jmin); j >= jmin; --j) { 132 Node* addp = shift->last_out(j); 133 for (DUIterator_Last kmin, k = addp->last_outs(kmin); k >= kmin; --k) { 134 Node* mem = addp->last_out(k); 135 if (UseCondCardMark && mem->is_Load()) { 136 assert(mem->Opcode() == Op_LoadB, "unexpected code shape"); 137 // The load is checking if the card has been written so 138 // replace it with zero to fold the test. 139 igvn->replace_node(mem, igvn->intcon(0)); 140 continue; 141 } 142 assert(mem->is_Store(), "store required"); 143 igvn->replace_node(mem, mem->in(MemNode::Memory)); 144 } 145 } 146 } 147 } 148 149 bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const { 150 bool is_oop = type == T_OBJECT || type == T_ARRAY; 151 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks()); 152 }