32 #include "opto/convertnode.hpp"
33 #include "opto/graphKit.hpp"
34 #include "opto/idealKit.hpp"
35 #include "opto/macro.hpp"
36 #include "opto/narrowptrnode.hpp"
37 #include "opto/output.hpp"
38 #include "opto/regalloc.hpp"
39 #include "opto/runtime.hpp"
40 #include "utilities/macros.hpp"
41 #include CPU_HEADER(gc/shared/barrierSetAssembler)
42
43 // By default this is a no-op.
44 void BarrierSetC2::resolve_address(C2Access& access) const { }
45
46 void* C2ParseAccess::barrier_set_state() const {
47 return _kit->barrier_set_state();
48 }
49
50 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
51
52 bool C2Access::needs_cpu_membar() const {
53 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
54 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
55
56 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
57 bool in_heap = (_decorators & IN_HEAP) != 0;
58 bool in_native = (_decorators & IN_NATIVE) != 0;
59 bool is_mixed = !in_heap && !in_native;
60
61 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
62 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
63 bool is_atomic = is_read && is_write;
64
65 if (is_atomic) {
66 // Atomics always need to be wrapped in CPU membars
67 return true;
68 }
69
70 if (anonymous) {
71 // We will need memory barriers unless we can determine a unique
190 DecoratorSet decorators = access.decorators();
191
192 Node* adr = access.addr().node();
193 const TypePtr* adr_type = access.addr().type();
194
195 bool mismatched = (decorators & C2_MISMATCHED) != 0;
196 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
197 bool unaligned = (decorators & C2_UNALIGNED) != 0;
198 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
199 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
200 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
201 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
202
203 MemNode::MemOrd mo = access.mem_node_mo();
204 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
205
206 Node* load;
207 if (access.is_parse_access()) {
208 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
209 GraphKit* kit = parse_access.kit();
210 Node* control = control_dependent ? kit->control() : nullptr;
211
212 if (immutable) {
213 Compile* C = Compile::current();
214 Node* mem = kit->immutable_memory();
215 load = LoadNode::make(kit->gvn(), control, mem, adr,
216 adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
217 unaligned, mismatched, unsafe, access.barrier_data());
218 load = kit->gvn().transform(load);
219 } else {
220 load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
221 dep, requires_atomic_access, unaligned, mismatched, unsafe,
222 access.barrier_data());
223 }
224 } else {
225 assert(access.is_opt_access(), "either parse or opt access");
226 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
227 Node* control = control_dependent ? opt_access.ctl() : nullptr;
228 MergeMemNode* mm = opt_access.mem();
229 PhaseGVN& gvn = opt_access.gvn();
230 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
850 assert(size->bottom_type()->base() == Type_X,
851 "Should be of object size type (int for 32 bits, long for 64 bits)");
852
853 // The native clone we are calling here expects the object size in words.
854 // Add header/offset size to payload size to get object size.
855 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
856 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
857 // HeapAccess<>::clone expects size in heap words.
858 // For 64-bits platforms, this is a no-operation.
859 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
860 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
861
862 Node* const call = phase->make_leaf_call(ctrl,
863 mem,
864 clone_type(),
865 clone_addr,
866 clone_name,
867 TypeRawPtr::BOTTOM,
868 src, dst, full_size_in_heap_words XTOP);
869 phase->transform_later(call);
870 phase->igvn().replace_node(ac, call);
871 }
872
873 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
874 Node* ctrl = ac->in(TypeFunc::Control);
875 Node* mem = ac->in(TypeFunc::Memory);
876 Node* src = ac->in(ArrayCopyNode::Src);
877 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
878 Node* dest = ac->in(ArrayCopyNode::Dest);
879 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
880 Node* length = ac->in(ArrayCopyNode::Length);
881
882 Node* payload_src = phase->basic_plus_adr(src, src_offset);
883 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
884
885 const char* copyfunc_name = "arraycopy";
886 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
887
888 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
889 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
890
891 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
892 phase->transform_later(call);
893
894 phase->igvn().replace_node(ac, call);
895 }
896
897 #undef XTOP
898
899 void BarrierSetC2::compute_liveness_at_stubs() const {
900 ResourceMark rm;
901 Compile* const C = Compile::current();
902 Arena* const A = Thread::current()->resource_area();
903 PhaseCFG* const cfg = C->cfg();
904 PhaseRegAlloc* const regalloc = C->regalloc();
905 RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
906 BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
907 BarrierSetC2State* bs_state = barrier_set_state();
908 Block_List worklist;
909
910 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
911 new ((void*)(live + i)) RegMask();
912 worklist.push(cfg->get_block(i));
913 }
914
|
32 #include "opto/convertnode.hpp"
33 #include "opto/graphKit.hpp"
34 #include "opto/idealKit.hpp"
35 #include "opto/macro.hpp"
36 #include "opto/narrowptrnode.hpp"
37 #include "opto/output.hpp"
38 #include "opto/regalloc.hpp"
39 #include "opto/runtime.hpp"
40 #include "utilities/macros.hpp"
41 #include CPU_HEADER(gc/shared/barrierSetAssembler)
42
43 // By default this is a no-op.
44 void BarrierSetC2::resolve_address(C2Access& access) const { }
45
46 void* C2ParseAccess::barrier_set_state() const {
47 return _kit->barrier_set_state();
48 }
49
50 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
51
52 Node* C2ParseAccess::control() const {
53 return _ctl == nullptr ? _kit->control() : _ctl;
54 }
55
56 bool C2Access::needs_cpu_membar() const {
57 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
58 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
59
60 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
61 bool in_heap = (_decorators & IN_HEAP) != 0;
62 bool in_native = (_decorators & IN_NATIVE) != 0;
63 bool is_mixed = !in_heap && !in_native;
64
65 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
66 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
67 bool is_atomic = is_read && is_write;
68
69 if (is_atomic) {
70 // Atomics always need to be wrapped in CPU membars
71 return true;
72 }
73
74 if (anonymous) {
75 // We will need memory barriers unless we can determine a unique
194 DecoratorSet decorators = access.decorators();
195
196 Node* adr = access.addr().node();
197 const TypePtr* adr_type = access.addr().type();
198
199 bool mismatched = (decorators & C2_MISMATCHED) != 0;
200 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
201 bool unaligned = (decorators & C2_UNALIGNED) != 0;
202 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
203 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
204 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
205 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
206
207 MemNode::MemOrd mo = access.mem_node_mo();
208 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
209
210 Node* load;
211 if (access.is_parse_access()) {
212 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
213 GraphKit* kit = parse_access.kit();
214 Node* control = control_dependent ? parse_access.control() : nullptr;
215
216 if (immutable) {
217 Compile* C = Compile::current();
218 Node* mem = kit->immutable_memory();
219 load = LoadNode::make(kit->gvn(), control, mem, adr,
220 adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
221 unaligned, mismatched, unsafe, access.barrier_data());
222 load = kit->gvn().transform(load);
223 } else {
224 load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
225 dep, requires_atomic_access, unaligned, mismatched, unsafe,
226 access.barrier_data());
227 }
228 } else {
229 assert(access.is_opt_access(), "either parse or opt access");
230 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
231 Node* control = control_dependent ? opt_access.ctl() : nullptr;
232 MergeMemNode* mm = opt_access.mem();
233 PhaseGVN& gvn = opt_access.gvn();
234 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
854 assert(size->bottom_type()->base() == Type_X,
855 "Should be of object size type (int for 32 bits, long for 64 bits)");
856
857 // The native clone we are calling here expects the object size in words.
858 // Add header/offset size to payload size to get object size.
859 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
860 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
861 // HeapAccess<>::clone expects size in heap words.
862 // For 64-bits platforms, this is a no-operation.
863 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
864 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
865
866 Node* const call = phase->make_leaf_call(ctrl,
867 mem,
868 clone_type(),
869 clone_addr,
870 clone_name,
871 TypeRawPtr::BOTTOM,
872 src, dst, full_size_in_heap_words XTOP);
873 phase->transform_later(call);
874 phase->replace_node(ac, call);
875 }
876
877 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
878 Node* ctrl = ac->in(TypeFunc::Control);
879 Node* mem = ac->in(TypeFunc::Memory);
880 Node* src = ac->in(ArrayCopyNode::Src);
881 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
882 Node* dest = ac->in(ArrayCopyNode::Dest);
883 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
884 Node* length = ac->in(ArrayCopyNode::Length);
885
886 Node* payload_src = phase->basic_plus_adr(src, src_offset);
887 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
888
889 const char* copyfunc_name = "arraycopy";
890 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
891
892 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
893 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
894
895 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
896 phase->transform_later(call);
897
898 phase->replace_node(ac, call);
899 }
900
901 #undef XTOP
902
903 void BarrierSetC2::compute_liveness_at_stubs() const {
904 ResourceMark rm;
905 Compile* const C = Compile::current();
906 Arena* const A = Thread::current()->resource_area();
907 PhaseCFG* const cfg = C->cfg();
908 PhaseRegAlloc* const regalloc = C->regalloc();
909 RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
910 BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
911 BarrierSetC2State* bs_state = barrier_set_state();
912 Block_List worklist;
913
914 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
915 new ((void*)(live + i)) RegMask();
916 worklist.push(cfg->get_block(i));
917 }
918
|