13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.inline.hpp"
26 #include "gc/g1/g1BarrierSet.hpp"
27 #include "gc/g1/g1BarrierSetAssembler.hpp"
28 #include "gc/g1/g1BarrierSetRuntime.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1HeapRegion.hpp"
31 #include "gc/g1/g1ThreadLocalData.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/debug.hpp"
35 #include "utilities/macros.hpp"
36 #ifdef COMPILER1
37 #include "c1/c1_LIRAssembler.hpp"
38 #include "c1/c1_MacroAssembler.hpp"
39 #include "gc/g1/c1/g1BarrierSetC1.hpp"
40 #endif // COMPILER1
41 #ifdef COMPILER2
42 #include "gc/g1/c2/g1BarrierSetC2.hpp"
43 #endif // COMPILER2
44
45 #define __ masm->
46
47 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
48 Register addr, Register count) {
49 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
50
51 if (!dest_uninitialized) {
52 Register thread = r15_thread;
218 bool expand_call) {
219 // If expand_call is true then we expand the call_VM_leaf macro
220 // directly to skip generating the check by
221 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
222 const Register thread = r15_thread;
223
224 Label done;
225
226 assert(pre_val != noreg, "check this code");
227
228 if (obj != noreg) {
229 assert_different_registers(obj, pre_val, tmp);
230 assert(pre_val != rax, "check this code");
231 }
232
233 generate_pre_barrier_fast_path(masm, thread);
234 // If marking is not active (*(mark queue active address) == 0), jump to done
235 __ jcc(Assembler::equal, done);
236 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, done);
237
238 // Determine and save the live input values
239 __ push_call_clobbered_registers();
240
241 // Calling the runtime using the regular call_VM_leaf mechanism generates
242 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
243 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
244 //
245 // If we care generating the pre-barrier without a frame (e.g. in the
246 // intrinsified Reference.get() routine) then ebp might be pointing to
247 // the caller frame and so this check will most likely fail at runtime.
248 //
249 // Expanding the call directly bypasses the generation of the check.
250 // So when we do not have have a full interpreter frame on the stack
251 // expand_call should be passed true.
252
253 if (expand_call) {
254 assert(pre_val != c_rarg1, "smashed arg");
255 if (c_rarg1 != thread) {
256 __ mov(c_rarg1, thread);
257 }
258 if (c_rarg0 != pre_val) {
259 __ mov(c_rarg0, pre_val);
260 }
261 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
262 } else {
263 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
264 }
265
266 __ pop_call_clobbered_registers();
267
268 __ bind(done);
269 }
270
271 static void generate_post_barrier(MacroAssembler* masm,
272 const Register store_addr,
273 const Register new_val,
274 const Register tmp1,
275 bool new_val_may_be_null) {
276
277 assert_different_registers(store_addr, new_val, tmp1, noreg);
278
279 Register thread = r15_thread;
280
281 Label L_done;
282 // Does store cross heap regions?
283 __ movptr(tmp1, store_addr); // tmp1 := store address
284 __ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
285 __ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
286 __ jccb(Assembler::equal, L_done);
363 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, *stub->continuation());
364
365 generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
366 __ jmp(*stub->continuation());
367 }
368
369 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
370 Register store_addr,
371 Register new_val,
372 Register tmp,
373 bool new_val_may_be_null) {
374 generate_post_barrier(masm, store_addr, new_val, tmp, new_val_may_be_null);
375 }
376
377 #endif // COMPILER2
378
379 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
380 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
381 bool in_heap = (decorators & IN_HEAP) != 0;
382 bool as_normal = (decorators & AS_NORMAL) != 0;
383
384 bool needs_pre_barrier = as_normal;
385 bool needs_post_barrier = val != noreg && in_heap;
386
387 // flatten object address if needed
388 // We do it regardless of precise because we need the registers
389 if (dst.index() == noreg && dst.disp() == 0) {
390 if (dst.base() != tmp1) {
391 __ movptr(tmp1, dst.base());
392 }
393 } else {
394 __ lea(tmp1, dst);
395 }
396
397 if (needs_pre_barrier) {
398 g1_write_barrier_pre(masm /*masm*/,
399 tmp1 /* obj */,
400 tmp2 /* pre_val */,
401 tmp3 /* tmp */,
402 val != noreg /* tosca_live */,
403 false /* expand_call */);
404 }
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.inline.hpp"
26 #include "gc/g1/g1BarrierSet.hpp"
27 #include "gc/g1/g1BarrierSetAssembler.hpp"
28 #include "gc/g1/g1BarrierSetRuntime.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1HeapRegion.hpp"
31 #include "gc/g1/g1ThreadLocalData.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "runtime/arguments.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/debug.hpp"
36 #include "utilities/macros.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_LIRAssembler.hpp"
39 #include "c1/c1_MacroAssembler.hpp"
40 #include "gc/g1/c1/g1BarrierSetC1.hpp"
41 #endif // COMPILER1
42 #ifdef COMPILER2
43 #include "gc/g1/c2/g1BarrierSetC2.hpp"
44 #endif // COMPILER2
45
46 #define __ masm->
47
48 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
49 Register addr, Register count) {
50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51
52 if (!dest_uninitialized) {
53 Register thread = r15_thread;
219 bool expand_call) {
220 // If expand_call is true then we expand the call_VM_leaf macro
221 // directly to skip generating the check by
222 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
223 const Register thread = r15_thread;
224
225 Label done;
226
227 assert(pre_val != noreg, "check this code");
228
229 if (obj != noreg) {
230 assert_different_registers(obj, pre_val, tmp);
231 assert(pre_val != rax, "check this code");
232 }
233
234 generate_pre_barrier_fast_path(masm, thread);
235 // If marking is not active (*(mark queue active address) == 0), jump to done
236 __ jcc(Assembler::equal, done);
237 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, done);
238
239 if (Arguments::is_valhalla_enabled() && InlineTypePassFieldsAsArgs) {
240 // Barriers might be emitted when converting between (scalarized) calling conventions for inline
241 // types. Save all argument registers before calling into the runtime.
242 // TODO 8366717: use push_set() (see JDK-8283327 push/pop_call_clobbered_registers & aarch64 )
243 __ pusha();
244 __ subptr(rsp, 64);
245 __ movdbl(Address(rsp, 0), j_farg0);
246 __ movdbl(Address(rsp, 8), j_farg1);
247 __ movdbl(Address(rsp, 16), j_farg2);
248 __ movdbl(Address(rsp, 24), j_farg3);
249 __ movdbl(Address(rsp, 32), j_farg4);
250 __ movdbl(Address(rsp, 40), j_farg5);
251 __ movdbl(Address(rsp, 48), j_farg6);
252 __ movdbl(Address(rsp, 56), j_farg7);
253 } else {
254 // Determine and save the live input values
255 __ push_call_clobbered_registers();
256 }
257
258 // Calling the runtime using the regular call_VM_leaf mechanism generates
259 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
260 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
261 //
262 // If we care generating the pre-barrier without a frame (e.g. in the
263 // intrinsified Reference.get() routine) then ebp might be pointing to
264 // the caller frame and so this check will most likely fail at runtime.
265 //
266 // Expanding the call directly bypasses the generation of the check.
267 // So when we do not have have a full interpreter frame on the stack
268 // expand_call should be passed true.
269
270 if (expand_call) {
271 assert(pre_val != c_rarg1, "smashed arg");
272 if (c_rarg1 != thread) {
273 __ mov(c_rarg1, thread);
274 }
275 if (c_rarg0 != pre_val) {
276 __ mov(c_rarg0, pre_val);
277 }
278 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
279 } else {
280 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
281 }
282
283 if (Arguments::is_valhalla_enabled() && InlineTypePassFieldsAsArgs) {
284 // Restore registers
285 __ movdbl(j_farg0, Address(rsp, 0));
286 __ movdbl(j_farg1, Address(rsp, 8));
287 __ movdbl(j_farg2, Address(rsp, 16));
288 __ movdbl(j_farg3, Address(rsp, 24));
289 __ movdbl(j_farg4, Address(rsp, 32));
290 __ movdbl(j_farg5, Address(rsp, 40));
291 __ movdbl(j_farg6, Address(rsp, 48));
292 __ movdbl(j_farg7, Address(rsp, 56));
293 __ addptr(rsp, 64);
294 __ popa();
295 } else {
296 __ pop_call_clobbered_registers();
297 }
298
299 __ bind(done);
300 }
301
302 static void generate_post_barrier(MacroAssembler* masm,
303 const Register store_addr,
304 const Register new_val,
305 const Register tmp1,
306 bool new_val_may_be_null) {
307
308 assert_different_registers(store_addr, new_val, tmp1, noreg);
309
310 Register thread = r15_thread;
311
312 Label L_done;
313 // Does store cross heap regions?
314 __ movptr(tmp1, store_addr); // tmp1 := store address
315 __ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
316 __ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
317 __ jccb(Assembler::equal, L_done);
394 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, *stub->continuation());
395
396 generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
397 __ jmp(*stub->continuation());
398 }
399
400 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
401 Register store_addr,
402 Register new_val,
403 Register tmp,
404 bool new_val_may_be_null) {
405 generate_post_barrier(masm, store_addr, new_val, tmp, new_val_may_be_null);
406 }
407
408 #endif // COMPILER2
409
410 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
411 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
412 bool in_heap = (decorators & IN_HEAP) != 0;
413 bool as_normal = (decorators & AS_NORMAL) != 0;
414 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
415
416 bool needs_pre_barrier = as_normal && !dest_uninitialized;
417 bool needs_post_barrier = val != noreg && in_heap;
418
419 // flatten object address if needed
420 // We do it regardless of precise because we need the registers
421 if (dst.index() == noreg && dst.disp() == 0) {
422 if (dst.base() != tmp1) {
423 __ movptr(tmp1, dst.base());
424 }
425 } else {
426 __ lea(tmp1, dst);
427 }
428
429 if (needs_pre_barrier) {
430 g1_write_barrier_pre(masm /*masm*/,
431 tmp1 /* obj */,
432 tmp2 /* pre_val */,
433 tmp3 /* tmp */,
434 val != noreg /* tosca_live */,
435 false /* expand_call */);
436 }
|