< prev index next >

src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp

Print this page

  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/macroAssembler.inline.hpp"



 26 #include "gc/g1/g1BarrierSet.hpp"
 27 #include "gc/g1/g1BarrierSetAssembler.hpp"
 28 #include "gc/g1/g1BarrierSetRuntime.hpp"
 29 #include "gc/g1/g1CardTable.hpp"
 30 #include "gc/g1/g1HeapRegion.hpp"
 31 #include "gc/g1/g1ThreadLocalData.hpp"
 32 #include "gc/shared/collectedHeap.hpp"
 33 #include "interpreter/interp_masm.hpp"
 34 #include "runtime/javaThread.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #ifdef COMPILER1
 37 #include "c1/c1_LIRAssembler.hpp"
 38 #include "c1/c1_MacroAssembler.hpp"
 39 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 40 #endif // COMPILER1
 41 #ifdef COMPILER2
 42 #include "gc/g1/c2/g1BarrierSetC2.hpp"
 43 #endif // COMPILER2
 44 
 45 #define __ masm->

190     assert(pre_val != c_rarg1, "smashed arg");
191     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
192   } else {
193     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
194   }
195 
196   __ pop_call_clobbered_registers();
197 
198   __ bind(done);
199 
200 }
201 
202 static void generate_post_barrier_fast_path(MacroAssembler* masm,
203                                             const Register store_addr,
204                                             const Register new_val,
205                                             const Register tmp1,
206                                             const Register tmp2,
207                                             Label& done,
208                                             bool new_val_may_be_null) {
209   // Does store cross heap regions?
210   __ eor(tmp1, store_addr, new_val);                     // tmp1 := store address ^ new value
211   __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);   // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
212   __ cbz(tmp1, done);
















213   // Crosses regions, storing null?
214   if (new_val_may_be_null) {
215     __ cbz(new_val, done);
216   }
217   // Storing region crossing non-null, is card young?
218   __ lsr(tmp1, store_addr, CardTable::card_shift());     // tmp1 := card address relative to card table base















219   __ load_byte_map_base(tmp2);                           // tmp2 := card table base address
220   __ add(tmp1, tmp1, tmp2);                              // tmp1 := card address
221   __ ldrb(tmp2, Address(tmp1));                          // tmp2 := card
222   __ cmpw(tmp2, (int)G1CardTable::g1_young_card_val());  // tmp2 := card == young_card_val?
223 }
224 
225 static void generate_post_barrier_slow_path(MacroAssembler* masm,
226                                             const Register thread,
227                                             const Register tmp1,
228                                             const Register tmp2,
229                                             Label& done,
230                                             Label& runtime) {
231   __ membar(Assembler::StoreLoad);  // StoreLoad membar
232   __ ldrb(tmp2, Address(tmp1));     // tmp2 := card
233   __ cbzw(tmp2, done);
234   // Storing a region crossing, non-null oop, card is clean.
235   // Dirty card and log.
236   STATIC_ASSERT(CardTable::dirty_card_val() == 0);
237   __ strb(zr, Address(tmp1));       // *(card address) := dirty_card_val
238   generate_queue_test_and_insertion(masm,

264   generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
265 
266   __ bind(runtime);
267   // save the live input values
268   RegSet saved = RegSet::of(store_addr);
269   __ push(saved, sp);
270   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
271   __ pop(saved, sp);
272 
273   __ bind(done);
274 }
275 
276 #if defined(COMPILER2)
277 
278 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
279   SaveLiveRegisters save_registers(masm, stub);
280   if (c_rarg0 != arg) {
281     __ mov(c_rarg0, arg);
282   }
283   __ mov(c_rarg1, rthread);
284   __ mov(rscratch1, runtime_path);
285   __ blr(rscratch1);
286 }
287 
288 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
289                                                     Register obj,
290                                                     Register pre_val,
291                                                     Register thread,
292                                                     Register tmp1,
293                                                     Register tmp2,
294                                                     G1PreBarrierStubC2* stub) {
295   assert(thread == rthread, "must be");
296   assert_different_registers(obj, pre_val, tmp1, tmp2);
297   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
298 
299   stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
300 
301   generate_pre_barrier_fast_path(masm, thread, tmp1);
302   // If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
303   __ cbnzw(tmp1, *stub->entry());
304 

  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/macroAssembler.inline.hpp"
 26 #if INCLUDE_CDS
 27 #include "code/SCCache.hpp"
 28 #endif
 29 #include "gc/g1/g1BarrierSet.hpp"
 30 #include "gc/g1/g1BarrierSetAssembler.hpp"
 31 #include "gc/g1/g1BarrierSetRuntime.hpp"
 32 #include "gc/g1/g1CardTable.hpp"
 33 #include "gc/g1/g1HeapRegion.hpp"
 34 #include "gc/g1/g1ThreadLocalData.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "interpreter/interp_masm.hpp"
 37 #include "runtime/javaThread.hpp"
 38 #include "runtime/sharedRuntime.hpp"
 39 #ifdef COMPILER1
 40 #include "c1/c1_LIRAssembler.hpp"
 41 #include "c1/c1_MacroAssembler.hpp"
 42 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 43 #endif // COMPILER1
 44 #ifdef COMPILER2
 45 #include "gc/g1/c2/g1BarrierSetC2.hpp"
 46 #endif // COMPILER2
 47 
 48 #define __ masm->

193     assert(pre_val != c_rarg1, "smashed arg");
194     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
195   } else {
196     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
197   }
198 
199   __ pop_call_clobbered_registers();
200 
201   __ bind(done);
202 
203 }
204 
205 static void generate_post_barrier_fast_path(MacroAssembler* masm,
206                                             const Register store_addr,
207                                             const Register new_val,
208                                             const Register tmp1,
209                                             const Register tmp2,
210                                             Label& done,
211                                             bool new_val_may_be_null) {
212   // Does store cross heap regions?
213 #if INCLUDE_CDS
214   // AOT code needs to load the barrier grain shift from the aot
215   // runtime constants area in the code cache otherwise we can compile
216   // it as an immediate operand
217   if (SCCache::is_on_for_write()) {
218     address grain_shift_address = (address)AOTRuntimeConstants::grain_shift_address();
219     __ eor(tmp1, store_addr, new_val);
220     __ lea(tmp2, ExternalAddress(grain_shift_address));
221     __ ldrb(tmp2, tmp2);
222     __ lsrv(tmp1, tmp1, tmp2);
223     __ cbz(tmp1, done);
224   } else
225 #endif
226   {
227     __ eor(tmp1, store_addr, new_val);                     // tmp1 := store address ^ new value
228     __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);   // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
229     __ cbz(tmp1, done);
230   }
231 
232   // Crosses regions, storing null?
233   if (new_val_may_be_null) {
234     __ cbz(new_val, done);
235   }
236   // Storing region crossing non-null, is card young?
237 
238 #if INCLUDE_CDS
239   // AOT code needs to load the barrier card shift from the aot
240   // runtime constants area in the code cache otherwise we can compile
241   // it as an immediate operand
242   if (SCCache::is_on_for_write()) {
243     address card_shift_address = (address)AOTRuntimeConstants::card_shift_address();
244     __ lea(tmp2, ExternalAddress(card_shift_address));
245     __ ldrb(tmp2, tmp2);
246     __ lsrv(tmp1, store_addr, tmp2);                        // tmp1 := card address relative to card table base
247   } else
248 #endif
249   {
250     __ lsr(tmp1, store_addr, CardTable::card_shift());     // tmp1 := card address relative to card table base
251   }
252 
253   __ load_byte_map_base(tmp2);                           // tmp2 := card table base address
254   __ add(tmp1, tmp1, tmp2);                              // tmp1 := card address
255   __ ldrb(tmp2, Address(tmp1));                          // tmp2 := card
256   __ cmpw(tmp2, (int)G1CardTable::g1_young_card_val());  // tmp2 := card == young_card_val?
257 }
258 
259 static void generate_post_barrier_slow_path(MacroAssembler* masm,
260                                             const Register thread,
261                                             const Register tmp1,
262                                             const Register tmp2,
263                                             Label& done,
264                                             Label& runtime) {
265   __ membar(Assembler::StoreLoad);  // StoreLoad membar
266   __ ldrb(tmp2, Address(tmp1));     // tmp2 := card
267   __ cbzw(tmp2, done);
268   // Storing a region crossing, non-null oop, card is clean.
269   // Dirty card and log.
270   STATIC_ASSERT(CardTable::dirty_card_val() == 0);
271   __ strb(zr, Address(tmp1));       // *(card address) := dirty_card_val
272   generate_queue_test_and_insertion(masm,

298   generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
299 
300   __ bind(runtime);
301   // save the live input values
302   RegSet saved = RegSet::of(store_addr);
303   __ push(saved, sp);
304   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
305   __ pop(saved, sp);
306 
307   __ bind(done);
308 }
309 
310 #if defined(COMPILER2)
311 
312 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
313   SaveLiveRegisters save_registers(masm, stub);
314   if (c_rarg0 != arg) {
315     __ mov(c_rarg0, arg);
316   }
317   __ mov(c_rarg1, rthread);
318   __ lea(rscratch1, RuntimeAddress(runtime_path));
319   __ blr(rscratch1);
320 }
321 
322 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
323                                                     Register obj,
324                                                     Register pre_val,
325                                                     Register thread,
326                                                     Register tmp1,
327                                                     Register tmp2,
328                                                     G1PreBarrierStubC2* stub) {
329   assert(thread == rthread, "must be");
330   assert_different_registers(obj, pre_val, tmp1, tmp2);
331   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
332 
333   stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
334 
335   generate_pre_barrier_fast_path(masm, thread, tmp1);
336   // If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
337   __ cbnzw(tmp1, *stub->entry());
338 
< prev index next >