< prev index next >

src/cpu/aarch64/vm/stubGenerator_aarch64.cpp

Print this page

        

*** 38,51 **** --- 38,58 ---- #include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" + #include "utilities/macros.hpp" #include "utilities/top.hpp" + + #include "stubRoutines_aarch64.hpp" + #ifdef COMPILER2 #include "opto/runtime.hpp" #endif + #if INCLUDE_ALL_GCS + #include "shenandoahBarrierSetAssembler_aarch64.hpp" + #endif // Declaration and definition of StubGenerator (no .hpp file). // For a more detailed description of the stub routine structure // see the comment in stubRoutines.hpp
*** 595,610 **** // count - element count // tmp - scratch register // // Destroy no registers except rscratch1 and rscratch2 // ! void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: ! // With G1, don't generate the call if we statically know that the target in uninitialized if (!dest_uninitialized) { __ push_call_clobbered_registers(); if (count == c_rarg0) { if (addr == c_rarg1) { // exactly backwards!! --- 602,617 ---- // count - element count // tmp - scratch register // // Destroy no registers except rscratch1 and rscratch2 // ! void gen_write_ref_array_pre_barrier(Register src, Register addr, Register count, bool dest_uninitialized) { BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: ! // Don't generate the call if we statically know that the target is uninitialized if (!dest_uninitialized) { __ push_call_clobbered_registers(); if (count == c_rarg0) { if (addr == c_rarg1) { // exactly backwards!!
*** 624,633 **** --- 631,645 ---- break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: case BarrierSet::ModRef: break; + #if INCLUDE_ALL_GCS + case BarrierSet::ShenandoahBarrierSet: + ShenandoahBarrierSetAssembler::bsasm()->arraycopy_prologue(_masm, dest_uninitialized, src, addr, count); + break; + #endif default: ShouldNotReachHere(); } }
*** 692,701 **** --- 704,717 ---- __ strb(zr, Address(start, count)); __ subs(count, count, 1); __ br(Assembler::GE, L_loop); } break; + #if INCLUDE_ALL_GCS + case BarrierSet::ShenandoahBarrierSet: + break; + #endif default: ShouldNotReachHere(); } __ bind(L_done);
*** 1409,1419 **** } if (is_oop) { __ push(RegSet::of(d, count), sp); // no registers are destroyed by this call ! gen_write_ref_array_pre_barrier(d, count, dest_uninitialized); } copy_memory(aligned, s, d, count, rscratch1, size); if (is_oop) { __ pop(RegSet::of(d, count), sp); if (VerifyOops) --- 1425,1435 ---- } if (is_oop) { __ push(RegSet::of(d, count), sp); // no registers are destroyed by this call ! gen_write_ref_array_pre_barrier(s, d, count, dest_uninitialized); } copy_memory(aligned, s, d, count, rscratch1, size); if (is_oop) { __ pop(RegSet::of(d, count), sp); if (VerifyOops)
*** 1465,1475 **** __ br(Assembler::HS, nooverlap_target); if (is_oop) { __ push(RegSet::of(d, count), sp); // no registers are destroyed by this call ! gen_write_ref_array_pre_barrier(d, count, dest_uninitialized); } copy_memory(aligned, s, d, count, rscratch1, -size); if (is_oop) { __ pop(RegSet::of(d, count), sp); if (VerifyOops) --- 1481,1491 ---- __ br(Assembler::HS, nooverlap_target); if (is_oop) { __ push(RegSet::of(d, count), sp); // no registers are destroyed by this call ! gen_write_ref_array_pre_barrier(s, d, count, dest_uninitialized); } copy_memory(aligned, s, d, count, rscratch1, -size); if (is_oop) { __ pop(RegSet::of(d, count), sp); if (VerifyOops)
*** 1599,1609 **** // Side Effects: // disjoint_int_copy_entry is set to the no-overlap entry point // used by generate_conjoint_int_oop_copy(). // address generate_disjoint_int_copy(bool aligned, address *entry, ! const char *name, bool dest_uninitialized = false) { const bool not_oop = false; return generate_disjoint_copy(sizeof (jint), aligned, not_oop, entry, name); } // Arguments: --- 1615,1625 ---- // Side Effects: // disjoint_int_copy_entry is set to the no-overlap entry point // used by generate_conjoint_int_oop_copy(). // address generate_disjoint_int_copy(bool aligned, address *entry, ! const char *name) { const bool not_oop = false; return generate_disjoint_copy(sizeof (jint), aligned, not_oop, entry, name); } // Arguments:
*** 1807,1817 **** __ stop("super_check_offset inconsistent"); __ bind(L); } #endif //ASSERT ! gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); // save the original count __ mov(count_save, count); // Copy from low to high addresses --- 1823,1833 ---- __ stop("super_check_offset inconsistent"); __ bind(L); } #endif //ASSERT ! gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); // save the original count __ mov(count_save, count); // Copy from low to high addresses
< prev index next >