8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_IR.hpp"
27 #include "gc/shared/satbMarkQueue.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
35
36 #ifdef ASSERT
37 #define __ gen->lir(__FILE__, __LINE__)->
38 #else
39 #define __ gen->lir()->
40 #endif
41
42 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
43 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
44 bs->gen_pre_barrier_stub(ce, this);
45 }
46
47 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
176 }
177 return obj;
178 }
179
180 LIR_Opr ShenandoahBarrierSetC1::iu_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
181 if (ShenandoahIUBarrier) {
182 obj = ensure_in_register(gen, obj, T_OBJECT);
183 pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
184 }
185 return obj;
186 }
187
188 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
189 if (access.is_oop()) {
190 if (ShenandoahSATBBarrier) {
191 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
192 }
193 value = iu_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
194 }
195 BarrierSetC1::store_at_resolved(access, value);
196 }
197
198 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
199 // We must resolve in register when patching. This is to avoid
200 // having a patch area in the load barrier stub, since the call
201 // into the runtime to patch will not have the proper oop map.
202 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
203 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
204 }
205
206 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
207 // 1: non-reference load, no additional barrier is needed
208 if (!access.is_oop()) {
209 BarrierSetC1::load_at_resolved(access, result);
210 return;
211 }
212
213 LIRGenerator* gen = access.gen();
214 DecoratorSet decorators = access.decorators();
215 BasicType type = access.type();
274 _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
275 "shenandoah_load_reference_barrier_strong_slow",
276 false, &lrb_strong_code_gen_cl);
277
278 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
279 _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
280 "shenandoah_load_reference_barrier_strong_native_slow",
281 false, &lrb_strong_native_code_gen_cl);
282
283 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
284 _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
285 "shenandoah_load_reference_barrier_weak_slow",
286 false, &lrb_weak_code_gen_cl);
287
288 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
289 _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
290 "shenandoah_load_reference_barrier_phantom_slow",
291 false, &lrb_phantom_code_gen_cl);
292 }
293 }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_IR.hpp"
27 #include "gc/shared/satbMarkQueue.hpp"
28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
36
37 #ifdef ASSERT
38 #define __ gen->lir(__FILE__, __LINE__)->
39 #else
40 #define __ gen->lir()->
41 #endif
42
43 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
44 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
45 bs->gen_pre_barrier_stub(ce, this);
46 }
47
48 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
177 }
178 return obj;
179 }
180
181 LIR_Opr ShenandoahBarrierSetC1::iu_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
182 if (ShenandoahIUBarrier) {
183 obj = ensure_in_register(gen, obj, T_OBJECT);
184 pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
185 }
186 return obj;
187 }
188
189 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
190 if (access.is_oop()) {
191 if (ShenandoahSATBBarrier) {
192 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
193 }
194 value = iu_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
195 }
196 BarrierSetC1::store_at_resolved(access, value);
197
198 if (access.is_oop()) {
199 DecoratorSet decorators = access.decorators();
200 bool is_array = (decorators & IS_ARRAY) != 0;
201 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
202
203 bool precise = is_array || on_anonymous;
204 LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
205 post_barrier(access, post_addr, value);
206 }
207 }
208
209 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
210 // We must resolve in register when patching. This is to avoid
211 // having a patch area in the load barrier stub, since the call
212 // into the runtime to patch will not have the proper oop map.
213 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
214 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
215 }
216
217 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
218 // 1: non-reference load, no additional barrier is needed
219 if (!access.is_oop()) {
220 BarrierSetC1::load_at_resolved(access, result);
221 return;
222 }
223
224 LIRGenerator* gen = access.gen();
225 DecoratorSet decorators = access.decorators();
226 BasicType type = access.type();
285 _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
286 "shenandoah_load_reference_barrier_strong_slow",
287 false, &lrb_strong_code_gen_cl);
288
289 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
290 _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
291 "shenandoah_load_reference_barrier_strong_native_slow",
292 false, &lrb_strong_native_code_gen_cl);
293
294 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
295 _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
296 "shenandoah_load_reference_barrier_weak_slow",
297 false, &lrb_weak_code_gen_cl);
298
299 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
300 _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
301 "shenandoah_load_reference_barrier_phantom_slow",
302 false, &lrb_phantom_code_gen_cl);
303 }
304 }
305
306 void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
307 if (!ShenandoahHeap::heap()->mode()->is_generational()) {
308 return;
309 }
310
311 DecoratorSet decorators = access.decorators();
312 LIRGenerator* gen = access.gen();
313 bool in_heap = (decorators & IN_HEAP) != 0;
314 if (!in_heap) {
315 return;
316 }
317
318 BarrierSet* bs = BarrierSet::barrier_set();
319 ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
320 CardTable* ct = ctbs->card_table();
321 LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
322 if (addr->is_address()) {
323 LIR_Address* address = addr->as_address_ptr();
324 // ptr cannot be an object because we use this barrier for array card marks
325 // and addr can point in the middle of an array.
326 LIR_Opr ptr = gen->new_pointer_register();
327 if (!address->index()->is_valid() && address->disp() == 0) {
328 __ move(address->base(), ptr);
329 } else {
330 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
331 __ leal(addr, ptr);
332 }
333 addr = ptr;
334 }
335 assert(addr->is_register(), "must be a register at this point");
336
337 LIR_Opr tmp = gen->new_pointer_register();
338 if (TwoOperandLIRForm) {
339 __ move(addr, tmp);
340 __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
341 } else {
342 __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
343 }
344
345 LIR_Address* card_addr;
346 if (gen->can_inline_as_constant(card_table_base)) {
347 card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
348 } else {
349 card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
350 }
351
352 LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
353 if (UseCondCardMark) {
354 LIR_Opr cur_value = gen->new_register(T_INT);
355 __ move(card_addr, cur_value);
356
357 LabelObj* L_already_dirty = new LabelObj();
358 __ cmp(lir_cond_equal, cur_value, dirty);
359 __ branch(lir_cond_equal, L_already_dirty->label());
360 __ move(dirty, card_addr);
361 __ branch_destination(L_already_dirty->label());
362 } else {
363 __ move(dirty, card_addr);
364 }
365 }
|