< prev index next >

src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp

Print this page

  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "ci/ciUtilities.hpp"

 26 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
 27 #include "gc/shared/cardTable.hpp"
 28 #include "gc/shared/cardTableBarrierSet.hpp"
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "opto/arraycopynode.hpp"
 31 #include "opto/graphKit.hpp"
 32 #include "opto/idealKit.hpp"
 33 #include "opto/macro.hpp"
 34 #include "utilities/macros.hpp"
 35 
 36 #define __ ideal.
 37 
 38 Node* CardTableBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 39   DecoratorSet decorators = access.decorators();
 40 
 41   Node* adr = access.addr().node();
 42 
 43   bool is_array = (decorators & IS_ARRAY) != 0;
 44   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 45   bool in_heap = (decorators & IN_HEAP) != 0;

 97     kit->sync_kit(ideal);
 98     post_barrier(kit, access.base(), access.addr().node(), new_val, true);
 99     ideal.sync_kit(kit);
100   } ideal.end_if();
101   kit->final_sync(ideal);
102 
103   return load_store;
104 }
105 
106 Node* CardTableBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
107   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
108   if (!access.is_oop()) {
109     return result;
110   }
111 
112   post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true);
113 
114   return result;
115 }
116 
117 Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
118   // Get base of card map







119   CardTable::CardValue* card_table_base = ci_card_table_address();
120    if (card_table_base != nullptr) {
121      return kit->makecon(TypeRawPtr::make((address)card_table_base));
122    } else {
123      return kit->null();
124    }
125 }
126 
127 // vanilla post barrier
128 // Insert a write-barrier store.  This is to let generational GC work; we have
129 // to flag all oop-stores before the next GC point.
130 void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
131                                          Node* obj,
132                                          Node* adr,
133                                          Node* val,
134                                          bool use_precise) const {
135   // No store check needed if we're storing a null.
136   if (val != nullptr && val->is_Con()) {
137     const Type* t = val->bottom_type();
138     if (t == TypePtr::NULL_PTR || t == Type::TOP) {
139       return;
140     }
141   }
142 
143   if (use_ReduceInitialCardMarks()
144       && obj == kit->just_allocated_object(kit->control())) {

151   }
152 
153   if (!use_precise) {
154     // All card marks for a (non-array) instance are in one place:
155     adr = obj;
156   } else {
157     // Else it's an array (or unknown), and we want more precise card marks.
158   }
159 
160   assert(adr != nullptr, "");
161 
162   IdealKit ideal(kit, true);
163 
164   // Convert the pointer to an int prior to doing math on it
165   Node* cast = __ CastPX(__ ctrl(), adr);
166 
167   // Divide by card size
168   Node* card_offset = __ URShiftX(cast, __ ConI(CardTable::card_shift()));
169 
170   // Combine card table base and card offset
171   Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset);
172 
173   // Get the alias_index for raw card-mark memory
174   int adr_type = Compile::AliasIdxRaw;
175 
176   // Dirty card value to store
177   Node* dirty = __ ConI(CardTable::dirty_card_val());
178 
179   if (UseCondCardMark) {
180     // The classic GC reference write barrier is typically implemented
181     // as a store into the global card mark table.  Unfortunately
182     // unconditional stores can result in false sharing and excessive
183     // coherence traffic as well as false transactional aborts.
184     // UseCondCardMark enables MP "polite" conditional card mark
185     // stores.  In theory we could relax the load from ctrl() to
186     // no_ctrl, but that doesn't buy much latitude.
187     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
188     __ if_then(card_val, BoolTest::ne, dirty);
189   }
190 
191   // Smash dirty value into card

  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "ci/ciUtilities.hpp"
 26 #include "code/aotCodeCache.hpp"
 27 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
 28 #include "gc/shared/cardTable.hpp"
 29 #include "gc/shared/cardTableBarrierSet.hpp"
 30 #include "gc/shared/gc_globals.hpp"
 31 #include "opto/arraycopynode.hpp"
 32 #include "opto/graphKit.hpp"
 33 #include "opto/idealKit.hpp"
 34 #include "opto/macro.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 #define __ ideal.
 38 
 39 Node* CardTableBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
 40   DecoratorSet decorators = access.decorators();
 41 
 42   Node* adr = access.addr().node();
 43 
 44   bool is_array = (decorators & IS_ARRAY) != 0;
 45   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 46   bool in_heap = (decorators & IN_HEAP) != 0;

 98     kit->sync_kit(ideal);
 99     post_barrier(kit, access.base(), access.addr().node(), new_val, true);
100     ideal.sync_kit(kit);
101   } ideal.end_if();
102   kit->final_sync(ideal);
103 
104   return load_store;
105 }
106 
107 Node* CardTableBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
108   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
109   if (!access.is_oop()) {
110     return result;
111   }
112 
113   post_barrier(access.kit(), access.base(), access.addr().node(), new_val, true);
114 
115   return result;
116 }
117 
118 Node* CardTableBarrierSetC2::byte_map_base_node(IdealKit* kit) const {
119   // Get base of card map
120 #if INCLUDE_CDS
121   if (AOTCodeCache::is_on_for_dump()) {
122     // load the card table address from the AOT Runtime Constants area
123     Node* byte_map_base_adr = kit->makecon(TypeRawPtr::make(AOTRuntimeConstants::card_table_address()));
124     return kit->load_aot_const(byte_map_base_adr, TypeRawPtr::NOTNULL);
125   }
126 #endif
127   CardTable::CardValue* card_table_base = ci_card_table_address();
128   if (card_table_base != nullptr) {
129     return kit->makecon(TypeRawPtr::make((address)card_table_base));
130   } else {
131     return kit->makecon(Type::get_zero_type(T_ADDRESS));
132   }
133 }
134 
135 // vanilla post barrier
136 // Insert a write-barrier store.  This is to let generational GC work; we have
137 // to flag all oop-stores before the next GC point.
138 void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
139                                          Node* obj,
140                                          Node* adr,
141                                          Node* val,
142                                          bool use_precise) const {
143   // No store check needed if we're storing a null.
144   if (val != nullptr && val->is_Con()) {
145     const Type* t = val->bottom_type();
146     if (t == TypePtr::NULL_PTR || t == Type::TOP) {
147       return;
148     }
149   }
150 
151   if (use_ReduceInitialCardMarks()
152       && obj == kit->just_allocated_object(kit->control())) {

159   }
160 
161   if (!use_precise) {
162     // All card marks for a (non-array) instance are in one place:
163     adr = obj;
164   } else {
165     // Else it's an array (or unknown), and we want more precise card marks.
166   }
167 
168   assert(adr != nullptr, "");
169 
170   IdealKit ideal(kit, true);
171 
172   // Convert the pointer to an int prior to doing math on it
173   Node* cast = __ CastPX(__ ctrl(), adr);
174 
175   // Divide by card size
176   Node* card_offset = __ URShiftX(cast, __ ConI(CardTable::card_shift()));
177 
178   // Combine card table base and card offset
179   Node* card_adr = __ AddP(__ top(), byte_map_base_node(&ideal), card_offset);
180 
181   // Get the alias_index for raw card-mark memory
182   int adr_type = Compile::AliasIdxRaw;
183 
184   // Dirty card value to store
185   Node* dirty = __ ConI(CardTable::dirty_card_val());
186 
187   if (UseCondCardMark) {
188     // The classic GC reference write barrier is typically implemented
189     // as a store into the global card mark table.  Unfortunately
190     // unconditional stores can result in false sharing and excessive
191     // coherence traffic as well as false transactional aborts.
192     // UseCondCardMark enables MP "polite" conditional card mark
193     // stores.  In theory we could relax the load from ctrl() to
194     // no_ctrl, but that doesn't buy much latitude.
195     Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
196     __ if_then(card_val, BoolTest::ne, dirty);
197   }
198 
199   // Smash dirty value into card
< prev index next >