< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page

 32 #include "opto/convertnode.hpp"
 33 #include "opto/graphKit.hpp"
 34 #include "opto/idealKit.hpp"
 35 #include "opto/macro.hpp"
 36 #include "opto/narrowptrnode.hpp"
 37 #include "opto/output.hpp"
 38 #include "opto/regalloc.hpp"
 39 #include "opto/runtime.hpp"
 40 #include "utilities/macros.hpp"
 41 #include CPU_HEADER(gc/shared/barrierSetAssembler)
 42 
 43 // By default this is a no-op.
 44 void BarrierSetC2::resolve_address(C2Access& access) const { }
 45 
 46 void* C2ParseAccess::barrier_set_state() const {
 47   return _kit->barrier_set_state();
 48 }
 49 
 50 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
 51 




 52 bool C2Access::needs_cpu_membar() const {
 53   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
 54   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
 55 
 56   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 57   bool in_heap   = (_decorators & IN_HEAP) != 0;
 58   bool in_native = (_decorators & IN_NATIVE) != 0;
 59   bool is_mixed  = !in_heap && !in_native;
 60 
 61   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
 62   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
 63   bool is_atomic = is_read && is_write;
 64 
 65   if (is_atomic) {
 66     // Atomics always need to be wrapped in CPU membars
 67     return true;
 68   }
 69 
 70   if (anonymous) {
 71     // We will need memory barriers unless we can determine a unique

186   DecoratorSet decorators = access.decorators();
187 
188   Node* adr = access.addr().node();
189   const TypePtr* adr_type = access.addr().type();
190 
191   bool mismatched = (decorators & C2_MISMATCHED) != 0;
192   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
193   bool unaligned = (decorators & C2_UNALIGNED) != 0;
194   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
195   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
196   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
197   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
198 
199   MemNode::MemOrd mo = access.mem_node_mo();
200   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
201 
202   Node* load;
203   if (access.is_parse_access()) {
204     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
205     GraphKit* kit = parse_access.kit();
206     Node* control = control_dependent ? kit->control() : nullptr;
207 
208     if (immutable) {
209       Compile* C = Compile::current();
210       Node* mem = kit->immutable_memory();
211       load = LoadNode::make(kit->gvn(), control, mem, adr,
212                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
213                             unaligned, mismatched, unsafe, access.barrier_data());
214       load = kit->gvn().transform(load);
215     } else {
216       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
217                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
218                             access.barrier_data());
219     }
220   } else {
221     assert(access.is_opt_access(), "either parse or opt access");
222     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
223     Node* control = control_dependent ? opt_access.ctl() : nullptr;
224     MergeMemNode* mm = opt_access.mem();
225     PhaseGVN& gvn = opt_access.gvn();
226     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));

846   assert(size->bottom_type()->base() == Type_X,
847          "Should be of object size type (int for 32 bits, long for 64 bits)");
848 
849   // The native clone we are calling here expects the object size in words.
850   // Add header/offset size to payload size to get object size.
851   Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
852   Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
853   // HeapAccess<>::clone expects size in heap words.
854   // For 64-bits platforms, this is a no-operation.
855   // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
856   Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
857 
858   Node* const call = phase->make_leaf_call(ctrl,
859                                            mem,
860                                            clone_type(),
861                                            clone_addr,
862                                            clone_name,
863                                            TypeRawPtr::BOTTOM,
864                                            src, dst, full_size_in_heap_words XTOP);
865   phase->transform_later(call);
866   phase->igvn().replace_node(ac, call);
867 }
868 
869 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
870   Node* ctrl = ac->in(TypeFunc::Control);
871   Node* mem = ac->in(TypeFunc::Memory);
872   Node* src = ac->in(ArrayCopyNode::Src);
873   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
874   Node* dest = ac->in(ArrayCopyNode::Dest);
875   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
876   Node* length = ac->in(ArrayCopyNode::Length);
877 
878   Node* payload_src = phase->basic_plus_adr(src, src_offset);
879   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
880 
881   const char* copyfunc_name = "arraycopy";
882   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
883 
884   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
885   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
886 
887   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
888   phase->transform_later(call);
889 
890   phase->igvn().replace_node(ac, call);
891 }
892 
893 #undef XTOP
894 
895 void BarrierSetC2::compute_liveness_at_stubs() const {
896   ResourceMark rm;
897   Compile* const C = Compile::current();
898   Arena* const A = Thread::current()->resource_area();
899   PhaseCFG* const cfg = C->cfg();
900   PhaseRegAlloc* const regalloc = C->regalloc();
901   RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
902   BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
903   BarrierSetC2State* bs_state = barrier_set_state();
904   Block_List worklist;
905 
906   for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
907     new ((void*)(live + i)) RegMask();
908     worklist.push(cfg->get_block(i));
909   }
910 

 32 #include "opto/convertnode.hpp"
 33 #include "opto/graphKit.hpp"
 34 #include "opto/idealKit.hpp"
 35 #include "opto/macro.hpp"
 36 #include "opto/narrowptrnode.hpp"
 37 #include "opto/output.hpp"
 38 #include "opto/regalloc.hpp"
 39 #include "opto/runtime.hpp"
 40 #include "utilities/macros.hpp"
 41 #include CPU_HEADER(gc/shared/barrierSetAssembler)
 42 
 43 // By default this is a no-op.
 44 void BarrierSetC2::resolve_address(C2Access& access) const { }
 45 
 46 void* C2ParseAccess::barrier_set_state() const {
 47   return _kit->barrier_set_state();
 48 }
 49 
 50 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
 51 
 52 Node* C2ParseAccess::control() const {
 53   return _ctl == nullptr ? _kit->control() : _ctl;
 54 }
 55 
 56 bool C2Access::needs_cpu_membar() const {
 57   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
 58   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
 59 
 60   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 61   bool in_heap   = (_decorators & IN_HEAP) != 0;
 62   bool in_native = (_decorators & IN_NATIVE) != 0;
 63   bool is_mixed  = !in_heap && !in_native;
 64 
 65   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
 66   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
 67   bool is_atomic = is_read && is_write;
 68 
 69   if (is_atomic) {
 70     // Atomics always need to be wrapped in CPU membars
 71     return true;
 72   }
 73 
 74   if (anonymous) {
 75     // We will need memory barriers unless we can determine a unique

190   DecoratorSet decorators = access.decorators();
191 
192   Node* adr = access.addr().node();
193   const TypePtr* adr_type = access.addr().type();
194 
195   bool mismatched = (decorators & C2_MISMATCHED) != 0;
196   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
197   bool unaligned = (decorators & C2_UNALIGNED) != 0;
198   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
199   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
200   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
201   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
202 
203   MemNode::MemOrd mo = access.mem_node_mo();
204   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
205 
206   Node* load;
207   if (access.is_parse_access()) {
208     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
209     GraphKit* kit = parse_access.kit();
210     Node* control = control_dependent ? parse_access.control() : nullptr;
211 
212     if (immutable) {
213       Compile* C = Compile::current();
214       Node* mem = kit->immutable_memory();
215       load = LoadNode::make(kit->gvn(), control, mem, adr,
216                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
217                             unaligned, mismatched, unsafe, access.barrier_data());
218       load = kit->gvn().transform(load);
219     } else {
220       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
221                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
222                             access.barrier_data());
223     }
224   } else {
225     assert(access.is_opt_access(), "either parse or opt access");
226     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
227     Node* control = control_dependent ? opt_access.ctl() : nullptr;
228     MergeMemNode* mm = opt_access.mem();
229     PhaseGVN& gvn = opt_access.gvn();
230     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));

850   assert(size->bottom_type()->base() == Type_X,
851          "Should be of object size type (int for 32 bits, long for 64 bits)");
852 
853   // The native clone we are calling here expects the object size in words.
854   // Add header/offset size to payload size to get object size.
855   Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
856   Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
857   // HeapAccess<>::clone expects size in heap words.
858   // For 64-bits platforms, this is a no-operation.
859   // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
860   Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
861 
862   Node* const call = phase->make_leaf_call(ctrl,
863                                            mem,
864                                            clone_type(),
865                                            clone_addr,
866                                            clone_name,
867                                            TypeRawPtr::BOTTOM,
868                                            src, dst, full_size_in_heap_words XTOP);
869   phase->transform_later(call);
870   phase->replace_node(ac, call);
871 }
872 
873 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
874   Node* ctrl = ac->in(TypeFunc::Control);
875   Node* mem = ac->in(TypeFunc::Memory);
876   Node* src = ac->in(ArrayCopyNode::Src);
877   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
878   Node* dest = ac->in(ArrayCopyNode::Dest);
879   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
880   Node* length = ac->in(ArrayCopyNode::Length);
881 
882   Node* payload_src = phase->basic_plus_adr(src, src_offset);
883   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
884 
885   const char* copyfunc_name = "arraycopy";
886   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
887 
888   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
889   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
890 
891   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
892   phase->transform_later(call);
893 
894   phase->replace_node(ac, call);
895 }
896 
897 #undef XTOP
898 
899 void BarrierSetC2::compute_liveness_at_stubs() const {
900   ResourceMark rm;
901   Compile* const C = Compile::current();
902   Arena* const A = Thread::current()->resource_area();
903   PhaseCFG* const cfg = C->cfg();
904   PhaseRegAlloc* const regalloc = C->regalloc();
905   RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
906   BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
907   BarrierSetC2State* bs_state = barrier_set_state();
908   Block_List worklist;
909 
910   for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
911     new ((void*)(live + i)) RegMask();
912     worklist.push(cfg->get_block(i));
913   }
914 
< prev index next >