< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page

 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shared/c2/barrierSetC2.hpp"
 28 #include "opto/arraycopynode.hpp"
 29 #include "opto/convertnode.hpp"
 30 #include "opto/graphKit.hpp"
 31 #include "opto/idealKit.hpp"
 32 #include "opto/macro.hpp"
 33 #include "opto/narrowptrnode.hpp"
 34 #include "opto/runtime.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 // By default this is a no-op.
 38 void BarrierSetC2::resolve_address(C2Access& access) const { }
 39 
 40 void* C2ParseAccess::barrier_set_state() const {
 41   return _kit->barrier_set_state();
 42 }
 43 
 44 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
 45 




 46 bool C2Access::needs_cpu_membar() const {
 47   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
 48   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
 49 
 50   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 51   bool in_heap   = (_decorators & IN_HEAP) != 0;
 52   bool in_native = (_decorators & IN_NATIVE) != 0;
 53   bool is_mixed  = !in_heap && !in_native;
 54 
 55   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
 56   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
 57   bool is_atomic = is_read && is_write;
 58 
 59   if (is_atomic) {
 60     // Atomics always need to be wrapped in CPU membars
 61     return true;
 62   }
 63 
 64   if (anonymous) {
 65     // We will need memory barriers unless we can determine a unique

133   DecoratorSet decorators = access.decorators();
134 
135   Node* adr = access.addr().node();
136   const TypePtr* adr_type = access.addr().type();
137 
138   bool mismatched = (decorators & C2_MISMATCHED) != 0;
139   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
140   bool unaligned = (decorators & C2_UNALIGNED) != 0;
141   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
142   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
143   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
144   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
145 
146   MemNode::MemOrd mo = access.mem_node_mo();
147   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
148 
149   Node* load;
150   if (access.is_parse_access()) {
151     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
152     GraphKit* kit = parse_access.kit();
153     Node* control = control_dependent ? kit->control() : nullptr;
154 
155     if (immutable) {
156       Compile* C = Compile::current();
157       Node* mem = kit->immutable_memory();
158       load = LoadNode::make(kit->gvn(), control, mem, adr,
159                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
160                             unaligned, mismatched, unsafe, access.barrier_data());
161       load = kit->gvn().transform(load);
162     } else {
163       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
164                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
165                             access.barrier_data());
166     }
167   } else {
168     assert(access.is_opt_access(), "either parse or opt access");
169     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
170     Node* control = control_dependent ? opt_access.ctl() : nullptr;
171     MergeMemNode* mm = opt_access.mem();
172     PhaseGVN& gvn = opt_access.gvn();
173     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));

767   Node* ctrl = ac->in(TypeFunc::Control);
768   Node* mem = ac->in(TypeFunc::Memory);
769   Node* src = ac->in(ArrayCopyNode::Src);
770   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
771   Node* dest = ac->in(ArrayCopyNode::Dest);
772   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
773   Node* length = ac->in(ArrayCopyNode::Length);
774 
775   Node* payload_src = phase->basic_plus_adr(src, src_offset);
776   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
777 
778   const char* copyfunc_name = "arraycopy";
779   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
780 
781   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
782   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
783 
784   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
785   phase->transform_later(call);
786 
787   phase->igvn().replace_node(ac, call);
788 }
789 
790 #undef XTOP

 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shared/c2/barrierSetC2.hpp"
 28 #include "opto/arraycopynode.hpp"
 29 #include "opto/convertnode.hpp"
 30 #include "opto/graphKit.hpp"
 31 #include "opto/idealKit.hpp"
 32 #include "opto/macro.hpp"
 33 #include "opto/narrowptrnode.hpp"
 34 #include "opto/runtime.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 // By default this is a no-op.
 38 void BarrierSetC2::resolve_address(C2Access& access) const { }
 39 
 40 void* C2ParseAccess::barrier_set_state() const {
 41   return _kit->barrier_set_state();
 42 }
 43 
 44 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
 45 
 46 Node* C2ParseAccess::control() const {
 47   return _ctl == nullptr ? _kit->control() : _ctl;
 48 }
 49 
 50 bool C2Access::needs_cpu_membar() const {
 51   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
 52   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
 53 
 54   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 55   bool in_heap   = (_decorators & IN_HEAP) != 0;
 56   bool in_native = (_decorators & IN_NATIVE) != 0;
 57   bool is_mixed  = !in_heap && !in_native;
 58 
 59   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
 60   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
 61   bool is_atomic = is_read && is_write;
 62 
 63   if (is_atomic) {
 64     // Atomics always need to be wrapped in CPU membars
 65     return true;
 66   }
 67 
 68   if (anonymous) {
 69     // We will need memory barriers unless we can determine a unique

137   DecoratorSet decorators = access.decorators();
138 
139   Node* adr = access.addr().node();
140   const TypePtr* adr_type = access.addr().type();
141 
142   bool mismatched = (decorators & C2_MISMATCHED) != 0;
143   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
144   bool unaligned = (decorators & C2_UNALIGNED) != 0;
145   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
146   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
147   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
148   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
149 
150   MemNode::MemOrd mo = access.mem_node_mo();
151   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
152 
153   Node* load;
154   if (access.is_parse_access()) {
155     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
156     GraphKit* kit = parse_access.kit();
157     Node* control = control_dependent ? parse_access.control() : nullptr;
158 
159     if (immutable) {
160       Compile* C = Compile::current();
161       Node* mem = kit->immutable_memory();
162       load = LoadNode::make(kit->gvn(), control, mem, adr,
163                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
164                             unaligned, mismatched, unsafe, access.barrier_data());
165       load = kit->gvn().transform(load);
166     } else {
167       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
168                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
169                             access.barrier_data());
170     }
171   } else {
172     assert(access.is_opt_access(), "either parse or opt access");
173     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
174     Node* control = control_dependent ? opt_access.ctl() : nullptr;
175     MergeMemNode* mm = opt_access.mem();
176     PhaseGVN& gvn = opt_access.gvn();
177     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));

771   Node* ctrl = ac->in(TypeFunc::Control);
772   Node* mem = ac->in(TypeFunc::Memory);
773   Node* src = ac->in(ArrayCopyNode::Src);
774   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
775   Node* dest = ac->in(ArrayCopyNode::Dest);
776   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
777   Node* length = ac->in(ArrayCopyNode::Length);
778 
779   Node* payload_src = phase->basic_plus_adr(src, src_offset);
780   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
781 
782   const char* copyfunc_name = "arraycopy";
783   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
784 
785   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
786   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
787 
788   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
789   phase->transform_later(call);
790 
791   phase->replace_node(ac, call);
792 }
793 
794 #undef XTOP
< prev index next >