< prev index next >

src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp

Print this page

  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_LIRGenerator.hpp"
 27 #include "c1/c1_CodeStubs.hpp"



 28 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 29 #include "gc/g1/g1BarrierSet.hpp"
 30 #include "gc/g1/g1BarrierSetAssembler.hpp"
 31 #include "gc/g1/g1HeapRegion.hpp"
 32 #include "gc/g1/g1ThreadLocalData.hpp"
 33 #include "utilities/macros.hpp"
 34 
 35 #ifdef ASSERT
 36 #define __ gen->lir(__FILE__, __LINE__)->
 37 #else
 38 #define __ gen->lir()->
 39 #endif
 40 
 41 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 42   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 43   bs->gen_pre_barrier_stub(ce, this);
 44 }
 45 
 46 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 47   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();

136     }
137     new_val = new_val_reg;
138   }
139   assert(new_val->is_register(), "must be a register at this point");
140 
141   if (addr->is_address()) {
142     LIR_Address* address = addr->as_address_ptr();
143     LIR_Opr ptr = gen->new_pointer_register();
144     if (!address->index()->is_valid() && address->disp() == 0) {
145       __ move(address->base(), ptr);
146     } else {
147       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
148       __ leal(addr, ptr);
149     }
150     addr = ptr;
151   }
152   assert(addr->is_register(), "must be a register at this point");
153 
154   LIR_Opr xor_res = gen->new_pointer_register();
155   LIR_Opr xor_shift_res = gen->new_pointer_register();












156   if (two_operand_lir_form) {
157     __ move(addr, xor_res);
158     __ logical_xor(xor_res, new_val, xor_res);
159     __ move(xor_res, xor_shift_res);
160     __ unsigned_shift_right(xor_shift_res,
161                             LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
162                             xor_shift_res,
163                             LIR_Opr::illegalOpr());













164   } else {
165     __ logical_xor(addr, new_val, xor_res);
166     __ unsigned_shift_right(xor_res,
167                             LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
168                             xor_shift_res,
169                             LIR_Opr::illegalOpr());












170   }
171 
172   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
173 
174   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
175   __ branch(lir_cond_notEqual, slow);
176   __ branch_destination(slow->continuation());
177 }
178 
179 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
180   DecoratorSet decorators = access.decorators();
181   bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
182   bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
183   bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
184   LIRGenerator *gen = access.gen();
185 
186   BarrierSetC1::load_at_resolved(access, result);
187 
188   if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
189     // Register the value in the referent field with the pre-barrier

  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_LIRGenerator.hpp"
 27 #include "c1/c1_CodeStubs.hpp"
 28 #if INCLUDE_CDS
 29 #include "code/SCCache.hpp"
 30 #endif
 31 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 32 #include "gc/g1/g1BarrierSet.hpp"
 33 #include "gc/g1/g1BarrierSetAssembler.hpp"
 34 #include "gc/g1/g1HeapRegion.hpp"
 35 #include "gc/g1/g1ThreadLocalData.hpp"
 36 #include "utilities/macros.hpp"
 37 
 38 #ifdef ASSERT
 39 #define __ gen->lir(__FILE__, __LINE__)->
 40 #else
 41 #define __ gen->lir()->
 42 #endif
 43 
 44 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 45   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 46   bs->gen_pre_barrier_stub(ce, this);
 47 }
 48 
 49 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 50   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();

139     }
140     new_val = new_val_reg;
141   }
142   assert(new_val->is_register(), "must be a register at this point");
143 
144   if (addr->is_address()) {
145     LIR_Address* address = addr->as_address_ptr();
146     LIR_Opr ptr = gen->new_pointer_register();
147     if (!address->index()->is_valid() && address->disp() == 0) {
148       __ move(address->base(), ptr);
149     } else {
150       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
151       __ leal(addr, ptr);
152     }
153     addr = ptr;
154   }
155   assert(addr->is_register(), "must be a register at this point");
156 
157   LIR_Opr xor_res = gen->new_pointer_register();
158   LIR_Opr xor_shift_res = gen->new_pointer_register();
159 #if INCLUDE_CDS
160   // we need to load the grain shift from the AOT Runtime
161   // Constants Area
162   LIR_Opr grain_shift_addr = LIR_OprFact::intptrConst(AOTRuntimeConstants::grain_shift_address());
163   LIR_Opr grain_shift_reg = gen->new_pointer_register();
164   LIR_Address* grain_shift_indirect = new LIR_Address(grain_shift_reg, 0, T_INT);
165 #ifdef X86
166   LIR_Opr grain_shift = gen->shiftCountOpr();
167 #else // X86
168   LIR_Opr grain_shift = gen->new_register(T_INT);
169 #endif // X86
170 #endif
171   if (two_operand_lir_form) {
172     __ move(addr, xor_res);
173     __ logical_xor(xor_res, new_val, xor_res);
174 #if INCLUDE_CDS
175     if (SCCache::is_on_for_write()) {
176       __ move(grain_shift_addr, grain_shift_reg);
177       __ move(xor_res, xor_shift_res);
178       __ move(grain_shift_indirect, grain_shift);
179       __ unsigned_shift_right(xor_shift_res,
180                               grain_shift,
181                               xor_shift_res,
182                               LIR_Opr::illegalOpr());
183     } else
184 #endif
185     {
186       __ move(xor_res, xor_shift_res);
187       __ unsigned_shift_right(xor_shift_res,
188                               LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
189                               xor_shift_res,
190                               LIR_Opr::illegalOpr());
191     }
192   } else {
193     __ logical_xor(addr, new_val, xor_res);
194 #if INCLUDE_CDS
195     if (SCCache::is_on_for_write()) {
196       __ move(grain_shift_addr, grain_shift_reg);
197       __ move(grain_shift_indirect, grain_shift);
198       __ unsigned_shift_right(xor_res,
199                               grain_shift,
200                               xor_shift_res,
201                               LIR_Opr::illegalOpr());
202     } else
203 #endif
204     {
205       __ unsigned_shift_right(xor_res,
206                               LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
207                               xor_shift_res,
208                               LIR_Opr::illegalOpr());
209     }
210   }
211 
212   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
213 
214   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
215   __ branch(lir_cond_notEqual, slow);
216   __ branch_destination(slow->continuation());
217 }
218 
219 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
220   DecoratorSet decorators = access.decorators();
221   bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
222   bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
223   bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
224   LIRGenerator *gen = access.gen();
225 
226   BarrierSetC1::load_at_resolved(access, result);
227 
228   if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
229     // Register the value in the referent field with the pre-barrier
< prev index next >