< prev index next > src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
Print this page
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
+ #include "gc/shared/barrierSetRuntime.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/universe.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/jniHandles.hpp"
// LR is live. It must be saved around calls.
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
+
switch (type) {
case T_OBJECT:
case T_ARRAY: {
if (in_heap) {
if (UseCompressedOops) {
void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
switch (type) {
case T_OBJECT:
case T_ARRAY: {
- val = val == noreg ? zr : val;
if (in_heap) {
! if (UseCompressedOops) {
! assert(!dst.uses(val), "not enough registers");
! if (val != zr) {
! __ encode_heap_oop(val);
}
- __ strw(val, dst);
} else {
! __ str(val, dst);
}
} else {
assert(in_native, "why else?");
__ str(val, dst);
}
break;
}
case T_BOOLEAN:
void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
+ bool is_not_null = (decorators & IS_NOT_NULL) != 0;
+
switch (type) {
case T_OBJECT:
case T_ARRAY: {
if (in_heap) {
! if (val == noreg) {
! assert(!is_not_null, "inconsistent access");
! if (UseCompressedOops) {
! __ strw(zr, dst);
+ } else {
+ __ str(zr, dst);
}
} else {
! if (UseCompressedOops) {
+ assert(!dst.uses(val), "not enough registers");
+ if (is_not_null) {
+ __ encode_heap_oop_not_null(val);
+ } else {
+ __ encode_heap_oop(val);
+ }
+ __ strw(val, dst);
+ } else {
+ __ str(val, dst);
+ }
}
} else {
assert(in_native, "why else?");
+ assert(val != noreg, "not supported");
__ str(val, dst);
}
break;
}
case T_BOOLEAN:
case T_DOUBLE: __ strd(v0, dst); break;
default: Unimplemented();
}
}
+ void BarrierSetAssembler::value_copy(MacroAssembler* masm, DecoratorSet decorators,
+ Register src, Register dst, Register value_klass) {
+ // value_copy implementation is fairly complex, and there are not any
+ // "short-cuts" to be made from asm. What there is, appears to have the same
+ // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds
+ // of hand-rolled instructions...
+ if (decorators & IS_DEST_UNINITIALIZED) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, value_klass);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, value_klass);
+ }
+ }
+
+ void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet decorators,
+ Register src, Register dst, Register inline_layout_info) {
+ // flat_field_copy implementation is fairly complex, and there are not any
+ // "short-cuts" to be made from asm. What there is, appears to have the same
+ // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds
+ // of hand-rolled instructions...
+ if (decorators & IS_DEST_UNINITIALIZED) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized2), src, dst, inline_layout_info);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy2), src, dst, inline_layout_info);
+ }
+ }
+
void BarrierSetAssembler::copy_load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
size_t bytes,
Register dst1,
< prev index next >