31 #include "opto/convertnode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/idealKit.hpp"
34 #include "opto/macro.hpp"
35 #include "opto/narrowptrnode.hpp"
36 #include "opto/output.hpp"
37 #include "opto/regalloc.hpp"
38 #include "opto/runtime.hpp"
39 #include "utilities/macros.hpp"
40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
41
42 // By default this is a no-op.
43 void BarrierSetC2::resolve_address(C2Access& access) const { }
44
45 void* C2ParseAccess::barrier_set_state() const {
46 return _kit->barrier_set_state();
47 }
48
49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
50
51 bool C2Access::needs_cpu_membar() const {
52 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
53 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
54
55 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
56 bool in_heap = (_decorators & IN_HEAP) != 0;
57 bool in_native = (_decorators & IN_NATIVE) != 0;
58 bool is_mixed = !in_heap && !in_native;
59
60 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
61 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
62 bool is_atomic = is_read && is_write;
63
64 if (is_atomic) {
65 // Atomics always need to be wrapped in CPU membars
66 return true;
67 }
68
69 if (anonymous) {
70 // We will need memory barriers unless we can determine a unique
184 DecoratorSet decorators = access.decorators();
185
186 Node* adr = access.addr().node();
187 const TypePtr* adr_type = access.addr().type();
188
189 bool mismatched = (decorators & C2_MISMATCHED) != 0;
190 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
191 bool unaligned = (decorators & C2_UNALIGNED) != 0;
192 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
193 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
194 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
195 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
196
197 MemNode::MemOrd mo = access.mem_node_mo();
198 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
199
200 Node* load;
201 if (access.is_parse_access()) {
202 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
203 GraphKit* kit = parse_access.kit();
204 Node* control = control_dependent ? kit->control() : nullptr;
205
206 if (immutable) {
207 Compile* C = Compile::current();
208 Node* mem = kit->immutable_memory();
209 load = LoadNode::make(kit->gvn(), control, mem, adr,
210 adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
211 unaligned, mismatched, unsafe, access.barrier_data());
212 load = kit->gvn().transform(load);
213 } else {
214 load = kit->make_load(control, adr, val_type, access.type(), mo,
215 dep, requires_atomic_access, unaligned, mismatched, unsafe,
216 access.barrier_data());
217 }
218 } else {
219 assert(access.is_opt_access(), "either parse or opt access");
220 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
221 Node* control = control_dependent ? opt_access.ctl() : nullptr;
222 MergeMemNode* mm = opt_access.mem();
223 PhaseGVN& gvn = opt_access.gvn();
224 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
845 assert(size->bottom_type()->base() == Type_X,
846 "Should be of object size type (int for 32 bits, long for 64 bits)");
847
848 // The native clone we are calling here expects the object size in words.
849 // Add header/offset size to payload size to get object size.
850 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
851 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
852 // HeapAccess<>::clone expects size in heap words.
853 // For 64-bits platforms, this is a no-operation.
854 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
855 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
856
857 Node* const call = phase->make_leaf_call(ctrl,
858 mem,
859 clone_type(),
860 clone_addr,
861 clone_name,
862 TypeRawPtr::BOTTOM,
863 src, dst, full_size_in_heap_words XTOP);
864 phase->transform_later(call);
865 phase->igvn().replace_node(ac, call);
866 }
867
868 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
869 Node* ctrl = ac->in(TypeFunc::Control);
870 Node* mem = ac->in(TypeFunc::Memory);
871 Node* src = ac->in(ArrayCopyNode::Src);
872 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
873 Node* dest = ac->in(ArrayCopyNode::Dest);
874 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
875 Node* length = ac->in(ArrayCopyNode::Length);
876
877 Node* payload_src = phase->basic_plus_adr(src, src_offset);
878 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
879
880 const char* copyfunc_name = "arraycopy";
881 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
882
883 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
884 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
885
886 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
887 phase->transform_later(call);
888
889 phase->igvn().replace_node(ac, call);
890 }
891
892 #undef XTOP
893
894 static bool block_has_safepoint(const Block* block, uint from, uint to) {
895 for (uint i = from; i < to; i++) {
896 if (block->get_node(i)->is_MachSafePoint()) {
897 // Safepoint found
898 return true;
899 }
900 }
901
902 // Safepoint not found
903 return false;
904 }
905
906 static bool block_has_safepoint(const Block* block) {
907 return block_has_safepoint(block, 0, block->number_of_nodes());
908 }
909
|
31 #include "opto/convertnode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/idealKit.hpp"
34 #include "opto/macro.hpp"
35 #include "opto/narrowptrnode.hpp"
36 #include "opto/output.hpp"
37 #include "opto/regalloc.hpp"
38 #include "opto/runtime.hpp"
39 #include "utilities/macros.hpp"
40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
41
42 // By default this is a no-op.
43 void BarrierSetC2::resolve_address(C2Access& access) const { }
44
45 void* C2ParseAccess::barrier_set_state() const {
46 return _kit->barrier_set_state();
47 }
48
49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
50
51 Node* C2ParseAccess::control() const {
52 return _ctl == nullptr ? _kit->control() : _ctl;
53 }
54
55 bool C2Access::needs_cpu_membar() const {
56 bool mismatched = (_decorators & C2_MISMATCHED) != 0;
57 bool is_unordered = (_decorators & MO_UNORDERED) != 0;
58
59 bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
60 bool in_heap = (_decorators & IN_HEAP) != 0;
61 bool in_native = (_decorators & IN_NATIVE) != 0;
62 bool is_mixed = !in_heap && !in_native;
63
64 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
65 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
66 bool is_atomic = is_read && is_write;
67
68 if (is_atomic) {
69 // Atomics always need to be wrapped in CPU membars
70 return true;
71 }
72
73 if (anonymous) {
74 // We will need memory barriers unless we can determine a unique
188 DecoratorSet decorators = access.decorators();
189
190 Node* adr = access.addr().node();
191 const TypePtr* adr_type = access.addr().type();
192
193 bool mismatched = (decorators & C2_MISMATCHED) != 0;
194 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
195 bool unaligned = (decorators & C2_UNALIGNED) != 0;
196 bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
197 bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
198 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
199 bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
200
201 MemNode::MemOrd mo = access.mem_node_mo();
202 LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
203
204 Node* load;
205 if (access.is_parse_access()) {
206 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
207 GraphKit* kit = parse_access.kit();
208 Node* control = control_dependent ? parse_access.control() : nullptr;
209
210 if (immutable) {
211 Compile* C = Compile::current();
212 Node* mem = kit->immutable_memory();
213 load = LoadNode::make(kit->gvn(), control, mem, adr,
214 adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
215 unaligned, mismatched, unsafe, access.barrier_data());
216 load = kit->gvn().transform(load);
217 } else {
218 load = kit->make_load(control, adr, val_type, access.type(), mo,
219 dep, requires_atomic_access, unaligned, mismatched, unsafe,
220 access.barrier_data());
221 }
222 } else {
223 assert(access.is_opt_access(), "either parse or opt access");
224 C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
225 Node* control = control_dependent ? opt_access.ctl() : nullptr;
226 MergeMemNode* mm = opt_access.mem();
227 PhaseGVN& gvn = opt_access.gvn();
228 Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
849 assert(size->bottom_type()->base() == Type_X,
850 "Should be of object size type (int for 32 bits, long for 64 bits)");
851
852 // The native clone we are calling here expects the object size in words.
853 // Add header/offset size to payload size to get object size.
854 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
855 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
856 // HeapAccess<>::clone expects size in heap words.
857 // For 64-bits platforms, this is a no-operation.
858 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
859 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
860
861 Node* const call = phase->make_leaf_call(ctrl,
862 mem,
863 clone_type(),
864 clone_addr,
865 clone_name,
866 TypeRawPtr::BOTTOM,
867 src, dst, full_size_in_heap_words XTOP);
868 phase->transform_later(call);
869 phase->replace_node(ac, call);
870 }
871
872 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
873 Node* ctrl = ac->in(TypeFunc::Control);
874 Node* mem = ac->in(TypeFunc::Memory);
875 Node* src = ac->in(ArrayCopyNode::Src);
876 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
877 Node* dest = ac->in(ArrayCopyNode::Dest);
878 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
879 Node* length = ac->in(ArrayCopyNode::Length);
880
881 Node* payload_src = phase->basic_plus_adr(src, src_offset);
882 Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
883
884 const char* copyfunc_name = "arraycopy";
885 address copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
886
887 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
888 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
889
890 Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
891 phase->transform_later(call);
892
893 phase->replace_node(ac, call);
894 }
895
896 #undef XTOP
897
898 static bool block_has_safepoint(const Block* block, uint from, uint to) {
899 for (uint i = from; i < to; i++) {
900 if (block->get_node(i)->is_MachSafePoint()) {
901 // Safepoint found
902 return true;
903 }
904 }
905
906 // Safepoint not found
907 return false;
908 }
909
910 static bool block_has_safepoint(const Block* block) {
911 return block_has_safepoint(block, 0, block->number_of_nodes());
912 }
913
|