218 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
219 const Register thread = r15_thread;
220
221 Label done;
222 Label runtime;
223
224 assert(pre_val != noreg, "check this code");
225
226 if (obj != noreg) {
227 assert_different_registers(obj, pre_val, tmp);
228 assert(pre_val != rax, "check this code");
229 }
230
231 generate_pre_barrier_fast_path(masm, thread);
232 // If marking is not active (*(mark queue active address) == 0), jump to done
233 __ jcc(Assembler::equal, done);
234 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, done, runtime);
235
236 __ bind(runtime);
237
238 // Determine and save the live input values
239 __ push_call_clobbered_registers();
240
241 // Calling the runtime using the regular call_VM_leaf mechanism generates
242 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
243 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
244 //
245 // If we care generating the pre-barrier without a frame (e.g. in the
246 // intrinsified Reference.get() routine) then ebp might be pointing to
247 // the caller frame and so this check will most likely fail at runtime.
248 //
249 // Expanding the call directly bypasses the generation of the check.
250 // So when we do not have have a full interpreter frame on the stack
251 // expand_call should be passed true.
252
253 if (expand_call) {
254 assert(pre_val != c_rarg1, "smashed arg");
255 if (c_rarg1 != thread) {
256 __ mov(c_rarg1, thread);
257 }
258 if (c_rarg0 != pre_val) {
259 __ mov(c_rarg0, pre_val);
260 }
261 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
262 } else {
263 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
264 }
265
266 __ pop_call_clobbered_registers();
267
268 __ bind(done);
269 }
270
271 static void generate_post_barrier(MacroAssembler* masm,
272 const Register store_addr,
273 const Register new_val,
274 const Register tmp1,
275 Label& done,
276 bool new_val_may_be_null) {
277
278 assert_different_registers(store_addr, new_val, tmp1, noreg);
279
280 Register thread = r15_thread;
281
282 // Does store cross heap regions?
283 __ movptr(tmp1, store_addr); // tmp1 := store address
284 __ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
285 __ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
286 __ jcc(Assembler::equal, done);
368 generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
369 __ jmp(*stub->continuation());
370 }
371
372 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
373 Register store_addr,
374 Register new_val,
375 Register tmp,
376 bool new_val_may_be_null) {
377 Label done;
378 generate_post_barrier(masm, store_addr, new_val, tmp, done, new_val_may_be_null);
379 __ bind(done);
380 }
381
382 #endif // COMPILER2
383
384 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
385 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
386 bool in_heap = (decorators & IN_HEAP) != 0;
387 bool as_normal = (decorators & AS_NORMAL) != 0;
388
389 bool needs_pre_barrier = as_normal;
390 bool needs_post_barrier = val != noreg && in_heap;
391
392 // flatten object address if needed
393 // We do it regardless of precise because we need the registers
394 if (dst.index() == noreg && dst.disp() == 0) {
395 if (dst.base() != tmp1) {
396 __ movptr(tmp1, dst.base());
397 }
398 } else {
399 __ lea(tmp1, dst);
400 }
401
402 if (needs_pre_barrier) {
403 g1_write_barrier_pre(masm /*masm*/,
404 tmp1 /* obj */,
405 tmp2 /* pre_val */,
406 tmp3 /* tmp */,
407 val != noreg /* tosca_live */,
408 false /* expand_call */);
409 }
|
218 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
219 const Register thread = r15_thread;
220
221 Label done;
222 Label runtime;
223
224 assert(pre_val != noreg, "check this code");
225
226 if (obj != noreg) {
227 assert_different_registers(obj, pre_val, tmp);
228 assert(pre_val != rax, "check this code");
229 }
230
231 generate_pre_barrier_fast_path(masm, thread);
232 // If marking is not active (*(mark queue active address) == 0), jump to done
233 __ jcc(Assembler::equal, done);
234 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp, done, runtime);
235
236 __ bind(runtime);
237
238 if (EnableValhalla && InlineTypePassFieldsAsArgs) {
239 // Barriers might be emitted when converting between (scalarized) calling conventions for inline
240 // types. Save all argument registers before calling into the runtime.
241 // TODO 8366717: use push_set() (see JDK-8283327 push/pop_call_clobbered_registers & aarch64 )
242 __ pusha();
243 __ subptr(rsp, 64);
244 __ movdbl(Address(rsp, 0), j_farg0);
245 __ movdbl(Address(rsp, 8), j_farg1);
246 __ movdbl(Address(rsp, 16), j_farg2);
247 __ movdbl(Address(rsp, 24), j_farg3);
248 __ movdbl(Address(rsp, 32), j_farg4);
249 __ movdbl(Address(rsp, 40), j_farg5);
250 __ movdbl(Address(rsp, 48), j_farg6);
251 __ movdbl(Address(rsp, 56), j_farg7);
252 } else {
253 // Determine and save the live input values
254 __ push_call_clobbered_registers();
255 }
256
257 // Calling the runtime using the regular call_VM_leaf mechanism generates
258 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
259 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
260 //
261 // If we care generating the pre-barrier without a frame (e.g. in the
262 // intrinsified Reference.get() routine) then ebp might be pointing to
263 // the caller frame and so this check will most likely fail at runtime.
264 //
265 // Expanding the call directly bypasses the generation of the check.
266 // So when we do not have have a full interpreter frame on the stack
267 // expand_call should be passed true.
268
269 if (expand_call) {
270 assert(pre_val != c_rarg1, "smashed arg");
271 if (c_rarg1 != thread) {
272 __ mov(c_rarg1, thread);
273 }
274 if (c_rarg0 != pre_val) {
275 __ mov(c_rarg0, pre_val);
276 }
277 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
278 } else {
279 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
280 }
281
282 if (EnableValhalla && InlineTypePassFieldsAsArgs) {
283 // Restore registers
284 __ movdbl(j_farg0, Address(rsp, 0));
285 __ movdbl(j_farg1, Address(rsp, 8));
286 __ movdbl(j_farg2, Address(rsp, 16));
287 __ movdbl(j_farg3, Address(rsp, 24));
288 __ movdbl(j_farg4, Address(rsp, 32));
289 __ movdbl(j_farg5, Address(rsp, 40));
290 __ movdbl(j_farg6, Address(rsp, 48));
291 __ movdbl(j_farg7, Address(rsp, 56));
292 __ addptr(rsp, 64);
293 __ popa();
294 } else {
295 __ pop_call_clobbered_registers();
296 }
297
298 __ bind(done);
299 }
300
301 static void generate_post_barrier(MacroAssembler* masm,
302 const Register store_addr,
303 const Register new_val,
304 const Register tmp1,
305 Label& done,
306 bool new_val_may_be_null) {
307
308 assert_different_registers(store_addr, new_val, tmp1, noreg);
309
310 Register thread = r15_thread;
311
312 // Does store cross heap regions?
313 __ movptr(tmp1, store_addr); // tmp1 := store address
314 __ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
315 __ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
316 __ jcc(Assembler::equal, done);
398 generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
399 __ jmp(*stub->continuation());
400 }
401
402 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
403 Register store_addr,
404 Register new_val,
405 Register tmp,
406 bool new_val_may_be_null) {
407 Label done;
408 generate_post_barrier(masm, store_addr, new_val, tmp, done, new_val_may_be_null);
409 __ bind(done);
410 }
411
412 #endif // COMPILER2
413
414 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
415 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
416 bool in_heap = (decorators & IN_HEAP) != 0;
417 bool as_normal = (decorators & AS_NORMAL) != 0;
418 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
419
420 bool needs_pre_barrier = as_normal && !dest_uninitialized;
421 bool needs_post_barrier = val != noreg && in_heap;
422
423 // flatten object address if needed
424 // We do it regardless of precise because we need the registers
425 if (dst.index() == noreg && dst.disp() == 0) {
426 if (dst.base() != tmp1) {
427 __ movptr(tmp1, dst.base());
428 }
429 } else {
430 __ lea(tmp1, dst);
431 }
432
433 if (needs_pre_barrier) {
434 g1_write_barrier_pre(masm /*masm*/,
435 tmp1 /* obj */,
436 tmp2 /* pre_val */,
437 tmp3 /* tmp */,
438 val != noreg /* tosca_live */,
439 false /* expand_call */);
440 }
|