31 #include "opto/convertnode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/idealKit.hpp"
34 #include "opto/macro.hpp"
35 #include "opto/narrowptrnode.hpp"
36 #include "opto/output.hpp"
37 #include "opto/regalloc.hpp"
38 #include "opto/runtime.hpp"
39 #include "utilities/macros.hpp"
40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
41
42 // By default this is a no-op.
43 void BarrierSetC2::resolve_address(C2Access& access) const { }
44
45 void* C2ParseAccess::barrier_set_state() const {
46 return _kit->barrier_set_state();
47 }
48
49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
50
51 bool C2Access::needs_cpu_membar() const {
52 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
53 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
54
55 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
56 bool in_heap = (_decorators & IN_HEAP) != 0;
57 bool in_native = (_decorators & IN_NATIVE) != 0;
58 bool is_mixed = !in_heap && !in_native;
59
60 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
61 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
62 bool is_atomic = is_read && is_write;
63
64 if (is_atomic) {
65 // Atomics always need to be wrapped in CPU membars
66 return true;
67 }
68
69 if (anonymous) {
70 // We will need memory barriers unless we can determine a unique
189 DecoratorSet decorators = access.decorators();
190
191 Node* adr = access.addr().node();
192 const TypePtr* adr_type = access.addr().type();
193
194 bool mismatched = (decorators & C2_MISMATCHED) != 0;
195 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
196 bool unaligned = (decorators & C2_UNALIGNED) != 0;
197 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
198 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
199 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
200 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
201
202 MemNode::MemOrd mo = access.mem_node_mo();
203 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
204
205 Node* load;
206 if (access.is_parse_access()) {
207 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
208 GraphKit* kit = parse_access.kit();
209 Node* control = control_dependent ? kit->control() : nullptr;
210
211 if (immutable) {
212 Compile* C = Compile::current();
213 Node* mem = kit->immutable_memory();
214 load = LoadNode::make(kit->gvn(), control, mem, adr,
215 adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
216 unaligned, mismatched, unsafe, access.barrier_data());
217 load = kit->gvn().transform(load);
218 } else {
219 load = kit->make_load(control, adr, val_type, access.type(), mo,
220 dep, requires_atomic_access, unaligned, mismatched, unsafe,
221 access.barrier_data());
222 }
223 } else {
224 assert(access.is_opt_access(), "either parse or opt access");
225 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
226 Node* control = control_dependent ? opt_access.ctl() : nullptr;
227 MergeMemNode* mm = opt_access.mem();
228 PhaseGVN& gvn = opt_access.gvn();
229 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
850 assert(size->bottom_type()->base() == Type_X,
851 "Should be of object size type (int for 32 bits, long for 64 bits)");
852
853 // The native clone we are calling here expects the object size in words.
854 // Add header/offset size to payload size to get object size.
855 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
856 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
857 // HeapAccess<>::clone expects size in heap words.
858 // For 64-bits platforms, this is a no-operation.
859 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
860 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
861
862 Node* const call = phase->make_leaf_call(ctrl,
863 mem,
864 clone_type(),
865 clone_addr,
866 clone_name,
867 TypeRawPtr::BOTTOM,
868 src, dst, full_size_in_heap_words XTOP);
869 phase->transform_later(call);
870 phase->igvn().replace_node(ac, call);
871 }
872
873 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
874 Node* ctrl = ac->in(TypeFunc::Control);
875 Node* mem = ac->in(TypeFunc::Memory);
876 Node* src = ac->in(ArrayCopyNode::Src);
877 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
878 Node* dest = ac->in(ArrayCopyNode::Dest);
879 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
880 Node* length = ac->in(ArrayCopyNode::Length);
881
882 Node* payload_src = phase->basic_plus_adr(src, src_offset);
883 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
884
885 const char* copyfunc_name = "arraycopy";
886 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
887
888 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
889 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
890
891 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
892 phase->transform_later(call);
893
894 phase->igvn().replace_node(ac, call);
895 }
896
897 #undef XTOP
898
899 void BarrierSetC2::compute_liveness_at_stubs() const {
900 ResourceMark rm;
901 Compile* const C = Compile::current();
902 Arena* const A = Thread::current()->resource_area();
903 PhaseCFG* const cfg = C->cfg();
904 PhaseRegAlloc* const regalloc = C->regalloc();
905 RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
906 BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
907 BarrierSetC2State* bs_state = barrier_set_state();
908 Block_List worklist;
909
910 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
911 new ((void*)(live + i)) RegMask();
912 worklist.push(cfg->get_block(i));
913 }
914
|
31 #include "opto/convertnode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/idealKit.hpp"
34 #include "opto/macro.hpp"
35 #include "opto/narrowptrnode.hpp"
36 #include "opto/output.hpp"
37 #include "opto/regalloc.hpp"
38 #include "opto/runtime.hpp"
39 #include "utilities/macros.hpp"
40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
41
42 // By default this is a no-op.
43 void BarrierSetC2::resolve_address(C2Access& access) const { }
44
45 void* C2ParseAccess::barrier_set_state() const {
46 return _kit->barrier_set_state();
47 }
48
49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
50
51 Node* C2ParseAccess::control() const {
52 return _ctl == nullptr ? _kit->control() : _ctl;
53 }
54
55 bool C2Access::needs_cpu_membar() const {
56 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
57 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
58
59 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
60 bool in_heap = (_decorators & IN_HEAP) != 0;
61 bool in_native = (_decorators & IN_NATIVE) != 0;
62 bool is_mixed = !in_heap && !in_native;
63
64 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
65 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
66 bool is_atomic = is_read && is_write;
67
68 if (is_atomic) {
69 // Atomics always need to be wrapped in CPU membars
70 return true;
71 }
72
73 if (anonymous) {
74 // We will need memory barriers unless we can determine a unique
193 DecoratorSet decorators = access.decorators();
194
195 Node* adr = access.addr().node();
196 const TypePtr* adr_type = access.addr().type();
197
198 bool mismatched = (decorators & C2_MISMATCHED) != 0;
199 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
200 bool unaligned = (decorators & C2_UNALIGNED) != 0;
201 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
202 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
203 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
204 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
205
206 MemNode::MemOrd mo = access.mem_node_mo();
207 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
208
209 Node* load;
210 if (access.is_parse_access()) {
211 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
212 GraphKit* kit = parse_access.kit();
213 Node* control = control_dependent ? parse_access.control() : nullptr;
214
215 if (immutable) {
216 Compile* C = Compile::current();
217 Node* mem = kit->immutable_memory();
218 load = LoadNode::make(kit->gvn(), control, mem, adr,
219 adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
220 unaligned, mismatched, unsafe, access.barrier_data());
221 load = kit->gvn().transform(load);
222 } else {
223 load = kit->make_load(control, adr, val_type, access.type(), mo,
224 dep, requires_atomic_access, unaligned, mismatched, unsafe,
225 access.barrier_data());
226 }
227 } else {
228 assert(access.is_opt_access(), "either parse or opt access");
229 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
230 Node* control = control_dependent ? opt_access.ctl() : nullptr;
231 MergeMemNode* mm = opt_access.mem();
232 PhaseGVN& gvn = opt_access.gvn();
233 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
854 assert(size->bottom_type()->base() == Type_X,
855 "Should be of object size type (int for 32 bits, long for 64 bits)");
856
857 // The native clone we are calling here expects the object size in words.
858 // Add header/offset size to payload size to get object size.
859 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
860 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
861 // HeapAccess<>::clone expects size in heap words.
862 // For 64-bits platforms, this is a no-operation.
863 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
864 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
865
866 Node* const call = phase->make_leaf_call(ctrl,
867 mem,
868 clone_type(),
869 clone_addr,
870 clone_name,
871 TypeRawPtr::BOTTOM,
872 src, dst, full_size_in_heap_words XTOP);
873 phase->transform_later(call);
874 phase->replace_node(ac, call);
875 }
876
877 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
878 Node* ctrl = ac->in(TypeFunc::Control);
879 Node* mem = ac->in(TypeFunc::Memory);
880 Node* src = ac->in(ArrayCopyNode::Src);
881 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
882 Node* dest = ac->in(ArrayCopyNode::Dest);
883 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
884 Node* length = ac->in(ArrayCopyNode::Length);
885
886 Node* payload_src = phase->basic_plus_adr(src, src_offset);
887 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
888
889 const char* copyfunc_name = "arraycopy";
890 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
891
892 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
893 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
894
895 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
896 phase->transform_later(call);
897
898 phase->replace_node(ac, call);
899 }
900
901 #undef XTOP
902
903 void BarrierSetC2::compute_liveness_at_stubs() const {
904 ResourceMark rm;
905 Compile* const C = Compile::current();
906 Arena* const A = Thread::current()->resource_area();
907 PhaseCFG* const cfg = C->cfg();
908 PhaseRegAlloc* const regalloc = C->regalloc();
909 RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
910 BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
911 BarrierSetC2State* bs_state = barrier_set_state();
912 Block_List worklist;
913
914 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
915 new ((void*)(live + i)) RegMask();
916 worklist.push(cfg->get_block(i));
917 }
918
|