< prev index next >

src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp

Print this page

156                                                  bool expand_call) {
157   // If expand_call is true then we expand the call_VM_leaf macro
158   // directly to skip generating the check by
159   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
160 
161   assert(thread == rthread, "must be");
162 
163   Label done;
164   Label runtime;
165 
166   assert_different_registers(obj, pre_val, tmp1, tmp2);
167   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
168 
169   generate_pre_barrier_fast_path(masm, thread, tmp1);
170   // If marking is not active (*(mark queue active address) == 0), jump to done
171   __ cbzw(tmp1, done);
172   generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
173 
174   __ bind(runtime);
175 
176   __ push_call_clobbered_registers();




















177 
178   // Calling the runtime using the regular call_VM_leaf mechanism generates
179   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
180   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
181   //
182   // If we care generating the pre-barrier without a frame (e.g. in the
183   // intrinsified Reference.get() routine) then rfp might be pointing to
184   // the caller frame and so this check will most likely fail at runtime.
185   //
186   // Expanding the call directly bypasses the generation of the check.
187   // So when we do not have have a full interpreter frame on the stack
188   // expand_call should be passed true.
189 
190   if (expand_call) {
191     assert(pre_val != c_rarg1, "smashed arg");
192     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
193   } else {
194     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
195   }
196 
197   __ pop_call_clobbered_registers();





198 
199   __ bind(done);
200 
201 }
202 
203 static void generate_post_barrier_fast_path(MacroAssembler* masm,
204                                             const Register store_addr,
205                                             const Register new_val,
206                                             const Register tmp1,
207                                             const Register tmp2,
208                                             Label& done,
209                                             bool new_val_may_be_null) {
210   // Does store cross heap regions?
211   __ eor(tmp1, store_addr, new_val);                     // tmp1 := store address ^ new value
212   __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);   // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
213   __ cbz(tmp1, done);
214   // Crosses regions, storing null?
215   if (new_val_may_be_null) {
216     __ cbz(new_val, done);
217   }

248                                                   Register store_addr,
249                                                   Register new_val,
250                                                   Register thread,
251                                                   Register tmp1,
252                                                   Register tmp2) {
253   assert(thread == rthread, "must be");
254   assert_different_registers(store_addr, new_val, thread, tmp1, tmp2,
255                              rscratch1);
256   assert(store_addr != noreg && new_val != noreg && tmp1 != noreg
257          && tmp2 != noreg, "expecting a register");
258 
259   Label done;
260   Label runtime;
261 
262   generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */);
263   // If card is young, jump to done
264   __ br(Assembler::EQ, done);
265   generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
266 
267   __ bind(runtime);

268   // save the live input values
269   RegSet saved = RegSet::of(store_addr);













270   __ push(saved, sp);

271   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);

272   __ pop(saved, sp);
273 
274   __ bind(done);
275 }
276 
277 #if defined(COMPILER2)
278 
279 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
280   SaveLiveRegisters save_registers(masm, stub);
281   if (c_rarg0 != arg) {
282     __ mov(c_rarg0, arg);
283   }
284   __ mov(c_rarg1, rthread);
285   __ mov(rscratch1, runtime_path);
286   __ blr(rscratch1);
287 }
288 
289 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
290                                                     Register obj,
291                                                     Register pre_val,

375   ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
376   if (on_oop && on_reference) {
377     // LR is live.  It must be saved around calls.
378     __ enter(/*strip_ret_addr*/true); // barrier may call runtime
379     // Generate the G1 pre-barrier code to log the value of
380     // the referent field in an SATB buffer.
381     g1_write_barrier_pre(masm /* masm */,
382                          noreg /* obj */,
383                          dst /* pre_val */,
384                          rthread /* thread */,
385                          tmp1 /* tmp1 */,
386                          tmp2 /* tmp2 */,
387                          true /* tosca_live */,
388                          true /* expand_call */);
389     __ leave();
390   }
391 }
392 
393 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
394                                          Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {










395   // flatten object address if needed
396   if (dst.index() == noreg && dst.offset() == 0) {
397     if (dst.base() != tmp3) {
398       __ mov(tmp3, dst.base());
399     }
400   } else {
401     __ lea(tmp3, dst);
402   }
403 
404   g1_write_barrier_pre(masm,
405                        tmp3 /* obj */,
406                        tmp2 /* pre_val */,
407                        rthread /* thread */,
408                        tmp1  /* tmp1 */,
409                        rscratch2  /* tmp2 */,
410                        val != noreg /* tosca_live */,
411                        false /* expand_call */);


412 
413   if (val == noreg) {
414     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
415   } else {
416     // G1 barrier needs uncompressed oop for region cross check.
417     Register new_val = val;
418     if (UseCompressedOops) {
419       new_val = rscratch2;
420       __ mov(new_val, val);


421     }

422     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
423     g1_write_barrier_post(masm,
424                           tmp3 /* store_adr */,
425                           new_val /* new_val */,
426                           rthread /* thread */,
427                           tmp1 /* tmp1 */,
428                           tmp2 /* tmp2 */);


429   }
430 
431 }
432 
433 #ifdef COMPILER1
434 
435 #undef __
436 #define __ ce->masm()->
437 
438 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
439   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
440   // At this point we know that marking is in progress.
441   // If do_load() is true then we have to emit the
442   // load of the previous value; otherwise it has already
443   // been loaded into _pre_val.
444 
445   __ bind(*stub->entry());
446 
447   assert(stub->pre_val()->is_register(), "Precondition.");
448 

156                                                  bool expand_call) {
157   // If expand_call is true then we expand the call_VM_leaf macro
158   // directly to skip generating the check by
159   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
160 
161   assert(thread == rthread, "must be");
162 
163   Label done;
164   Label runtime;
165 
166   assert_different_registers(obj, pre_val, tmp1, tmp2);
167   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
168 
169   generate_pre_barrier_fast_path(masm, thread, tmp1);
170   // If marking is not active (*(mark queue active address) == 0), jump to done
171   __ cbzw(tmp1, done);
172   generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
173 
174   __ bind(runtime);
175 
176   // save the live input values
177   RegSet saved = RegSet::of(pre_val);
178   FloatRegSet fsaved;
179 
180   // Barriers might be emitted when converting between (scalarized) calling
181   // conventions for inline types. Save all argument registers before calling
182   // into the runtime.
183   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
184     if (tosca_live) saved += RegSet::of(r0);
185     if (obj != noreg) saved += RegSet::of(obj);
186     saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
187     saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
188 
189     fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
190     fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
191 
192     __ push(saved, sp);
193     __ push_fp(fsaved, sp);
194   } else {
195     __ push_call_clobbered_registers();
196   }
197 
198   // Calling the runtime using the regular call_VM_leaf mechanism generates
199   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
200   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
201   //
202   // If we care generating the pre-barrier without a frame (e.g. in the
203   // intrinsified Reference.get() routine) then rfp might be pointing to
204   // the caller frame and so this check will most likely fail at runtime.
205   //
206   // Expanding the call directly bypasses the generation of the check.
207   // So when we do not have have a full interpreter frame on the stack
208   // expand_call should be passed true.
209 
210   if (expand_call) {
211     assert(pre_val != c_rarg1, "smashed arg");
212     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
213   } else {
214     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
215   }
216 
217   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
218   __ pop_fp(fsaved, sp);
219   __ pop(saved, sp);
220   } else {
221     __ pop_call_clobbered_registers();
222   }
223 
224   __ bind(done);
225 
226 }
227 
228 static void generate_post_barrier_fast_path(MacroAssembler* masm,
229                                             const Register store_addr,
230                                             const Register new_val,
231                                             const Register tmp1,
232                                             const Register tmp2,
233                                             Label& done,
234                                             bool new_val_may_be_null) {
235   // Does store cross heap regions?
236   __ eor(tmp1, store_addr, new_val);                     // tmp1 := store address ^ new value
237   __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);   // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
238   __ cbz(tmp1, done);
239   // Crosses regions, storing null?
240   if (new_val_may_be_null) {
241     __ cbz(new_val, done);
242   }

273                                                   Register store_addr,
274                                                   Register new_val,
275                                                   Register thread,
276                                                   Register tmp1,
277                                                   Register tmp2) {
278   assert(thread == rthread, "must be");
279   assert_different_registers(store_addr, new_val, thread, tmp1, tmp2,
280                              rscratch1);
281   assert(store_addr != noreg && new_val != noreg && tmp1 != noreg
282          && tmp2 != noreg, "expecting a register");
283 
284   Label done;
285   Label runtime;
286 
287   generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */);
288   // If card is young, jump to done
289   __ br(Assembler::EQ, done);
290   generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
291 
292   __ bind(runtime);
293 
294   // save the live input values
295   RegSet saved = RegSet::of(store_addr);
296   FloatRegSet fsaved;
297 
298   // Barriers might be emitted when converting between (scalarized) calling
299   // conventions for inline types. Save all argument registers before calling
300   // into the runtime.
301   if (EnableValhalla && InlineTypePassFieldsAsArgs) {
302     saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
303     saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
304 
305     fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
306     fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
307   }
308 
309   __ push(saved, sp);
310   __ push_fp(fsaved, sp);
311   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
312   __ pop_fp(fsaved, sp);
313   __ pop(saved, sp);
314 
315   __ bind(done);
316 }
317 
318 #if defined(COMPILER2)
319 
320 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
321   SaveLiveRegisters save_registers(masm, stub);
322   if (c_rarg0 != arg) {
323     __ mov(c_rarg0, arg);
324   }
325   __ mov(c_rarg1, rthread);
326   __ mov(rscratch1, runtime_path);
327   __ blr(rscratch1);
328 }
329 
330 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
331                                                     Register obj,
332                                                     Register pre_val,

416   ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
417   if (on_oop && on_reference) {
418     // LR is live.  It must be saved around calls.
419     __ enter(/*strip_ret_addr*/true); // barrier may call runtime
420     // Generate the G1 pre-barrier code to log the value of
421     // the referent field in an SATB buffer.
422     g1_write_barrier_pre(masm /* masm */,
423                          noreg /* obj */,
424                          dst /* pre_val */,
425                          rthread /* thread */,
426                          tmp1 /* tmp1 */,
427                          tmp2 /* tmp2 */,
428                          true /* tosca_live */,
429                          true /* expand_call */);
430     __ leave();
431   }
432 }
433 
434 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
435                                          Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
436 
437   bool in_heap = (decorators & IN_HEAP) != 0;
438   bool as_normal = (decorators & AS_NORMAL) != 0;
439   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
440 
441   bool needs_pre_barrier = as_normal && !dest_uninitialized;
442   bool needs_post_barrier = (val != noreg && in_heap);
443 
444   assert_different_registers(val, tmp1, tmp2, tmp3);
445 
446   // flatten object address if needed
447   if (dst.index() == noreg && dst.offset() == 0) {
448     if (dst.base() != tmp3) {
449       __ mov(tmp3, dst.base());
450     }
451   } else {
452     __ lea(tmp3, dst);
453   }
454 
455   if (needs_pre_barrier) {
456     g1_write_barrier_pre(masm,
457                          tmp3 /* obj */,
458                          tmp2 /* pre_val */,
459                          rthread /* thread */,
460                          tmp1  /* tmp1 */,
461                          rscratch2  /* tmp2 */,
462                          val != noreg /* tosca_live */,
463                          false /* expand_call */);
464   }
465 
466   if (val == noreg) {
467     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
468   } else {
469     // G1 barrier needs uncompressed oop for region cross check.
470     Register new_val = val;
471     if (needs_post_barrier) {
472       if (UseCompressedOops) {
473         new_val = rscratch2;
474         __ mov(new_val, val);
475       }
476     }
477 
478     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
479     if (needs_post_barrier) {
480       g1_write_barrier_post(masm,
481                             tmp3 /* store_adr */,
482                             new_val /* new_val */,
483                             rthread /* thread */,
484                             tmp1 /* tmp1 */,
485                             tmp2 /* tmp2 */);
486     }
487   }
488 
489 }
490 
491 #ifdef COMPILER1
492 
493 #undef __
494 #define __ ce->masm()->
495 
496 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
497   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
498   // At this point we know that marking is in progress.
499   // If do_load() is true then we have to emit the
500   // load of the previous value; otherwise it has already
501   // been loaded into _pre_val.
502 
503   __ bind(*stub->entry());
504 
505   assert(stub->pre_val()->is_register(), "Precondition.");
506 
< prev index next >