< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page

 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shared/c2/barrierSetC2.hpp"
 28 #include "opto/arraycopynode.hpp"
 29 #include "opto/convertnode.hpp"
 30 #include "opto/graphKit.hpp"
 31 #include "opto/idealKit.hpp"
 32 #include "opto/macro.hpp"
 33 #include "opto/narrowptrnode.hpp"
 34 #include "opto/runtime.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 // By default this is a no-op.
 38 void BarrierSetC2::resolve_address(C2Access& access) const { }
 39 
 40 void* C2ParseAccess::barrier_set_state() const {
 41   return _kit->barrier_set_state();
 42 }
 43 
 44 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
 45 




 46 bool C2Access::needs_cpu_membar() const {
 47   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
 48   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
 49 
 50   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 51   bool in_heap   = (_decorators & IN_HEAP) != 0;
 52   bool in_native = (_decorators & IN_NATIVE) != 0;
 53   bool is_mixed  = !in_heap && !in_native;
 54 
 55   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
 56   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
 57   bool is_atomic = is_read && is_write;
 58 
 59   if (is_atomic) {
 60     // Atomics always need to be wrapped in CPU membars
 61     return true;
 62   }
 63 
 64   if (anonymous) {
 65     // We will need memory barriers unless we can determine a unique

136   Node* adr = access.addr().node();
137   const TypePtr* adr_type = access.addr().type();
138 
139   bool mismatched = (decorators & C2_MISMATCHED) != 0;
140   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
141   bool unaligned = (decorators & C2_UNALIGNED) != 0;
142   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
143   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
144   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
145   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
146 
147   bool in_native = (decorators & IN_NATIVE) != 0;
148 
149   MemNode::MemOrd mo = access.mem_node_mo();
150   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
151 
152   Node* load;
153   if (access.is_parse_access()) {
154     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
155     GraphKit* kit = parse_access.kit();
156     Node* control = control_dependent ? kit->control() : NULL;
157 
158     if (immutable) {
159       assert(!requires_atomic_access, "can't ensure atomicity");
160       Compile* C = Compile::current();
161       Node* mem = kit->immutable_memory();
162       load = LoadNode::make(kit->gvn(), control, mem, adr,
163                             adr_type, val_type, access.type(), mo, dep, unaligned,
164                             mismatched, unsafe, access.barrier_data());
165       load = kit->gvn().transform(load);
166     } else {
167       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
168                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
169                             access.barrier_data());
170     }
171   } else {
172     assert(!requires_atomic_access, "not yet supported");
173     assert(access.is_opt_access(), "either parse or opt access");
174     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
175     Node* control = control_dependent ? opt_access.ctl() : NULL;
176     MergeMemNode* mm = opt_access.mem();

667   if (base_off % BytesPerLong != 0) {
668     assert(UseCompressedClassPointers, "");
669     if (is_array) {
670       // Exclude length to copy by 8 bytes words.
671       base_off += sizeof(int);
672     } else {
673       // Include klass to copy by 8 bytes words.
674       base_off = instanceOopDesc::klass_offset_in_bytes();
675     }
676     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
677   }
678   return base_off;
679 }
680 
681 void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
682   int base_off = arraycopy_payload_base_offset(is_array);
683   Node* payload_size = size;
684   Node* offset = kit->MakeConX(base_off);
685   payload_size = kit->gvn().transform(new SubXNode(payload_size, offset));
686   payload_size = kit->gvn().transform(new URShiftXNode(payload_size, kit->intcon(LogBytesPerLong)));
687   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset,  dst_base, offset, payload_size, true, false);
688   if (is_array) {
689     ac->set_clone_array();
690   } else {
691     ac->set_clone_inst();
692   }
693   Node* n = kit->gvn().transform(ac);
694   if (n == ac) {
695     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
696     ac->set_adr_type(TypeRawPtr::BOTTOM);
697     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
698   } else {
699     kit->set_all_memory(n);
700   }
701 }
702 
703 Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
704                                  Node*& i_o, Node*& needgc_ctrl,
705                                  Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
706                                  intx prefetch_lines) const {
707 

833   Node* ctrl = ac->in(TypeFunc::Control);
834   Node* mem = ac->in(TypeFunc::Memory);
835   Node* src = ac->in(ArrayCopyNode::Src);
836   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
837   Node* dest = ac->in(ArrayCopyNode::Dest);
838   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
839   Node* length = ac->in(ArrayCopyNode::Length);
840 
841   Node* payload_src = phase->basic_plus_adr(src, src_offset);
842   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
843 
844   const char* copyfunc_name = "arraycopy";
845   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, NULL, NULL, true, copyfunc_name, true);
846 
847   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
848   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
849 
850   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
851   phase->transform_later(call);
852 
853   phase->igvn().replace_node(ac, call);
854 }
855 
856 #undef XTOP

 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shared/c2/barrierSetC2.hpp"
 28 #include "opto/arraycopynode.hpp"
 29 #include "opto/convertnode.hpp"
 30 #include "opto/graphKit.hpp"
 31 #include "opto/idealKit.hpp"
 32 #include "opto/macro.hpp"
 33 #include "opto/narrowptrnode.hpp"
 34 #include "opto/runtime.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 // By default this is a no-op.
 38 void BarrierSetC2::resolve_address(C2Access& access) const { }
 39 
 40 void* C2ParseAccess::barrier_set_state() const {
 41   return _kit->barrier_set_state();
 42 }
 43 
 44 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
 45 
 46 Node* C2ParseAccess::control() const {
 47   return _ctl == NULL ? _kit->control() : _ctl;
 48 }
 49 
 50 bool C2Access::needs_cpu_membar() const {
 51   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
 52   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
 53 
 54   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 55   bool in_heap   = (_decorators & IN_HEAP) != 0;
 56   bool in_native = (_decorators & IN_NATIVE) != 0;
 57   bool is_mixed  = !in_heap && !in_native;
 58 
 59   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
 60   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
 61   bool is_atomic = is_read && is_write;
 62 
 63   if (is_atomic) {
 64     // Atomics always need to be wrapped in CPU membars
 65     return true;
 66   }
 67 
 68   if (anonymous) {
 69     // We will need memory barriers unless we can determine a unique

140   Node* adr = access.addr().node();
141   const TypePtr* adr_type = access.addr().type();
142 
143   bool mismatched = (decorators & C2_MISMATCHED) != 0;
144   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
145   bool unaligned = (decorators & C2_UNALIGNED) != 0;
146   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
147   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
148   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
149   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
150 
151   bool in_native = (decorators & IN_NATIVE) != 0;
152 
153   MemNode::MemOrd mo = access.mem_node_mo();
154   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
155 
156   Node* load;
157   if (access.is_parse_access()) {
158     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
159     GraphKit* kit = parse_access.kit();
160     Node* control = control_dependent ? parse_access.control() : NULL;
161 
162     if (immutable) {
163       assert(!requires_atomic_access, "can't ensure atomicity");
164       Compile* C = Compile::current();
165       Node* mem = kit->immutable_memory();
166       load = LoadNode::make(kit->gvn(), control, mem, adr,
167                             adr_type, val_type, access.type(), mo, dep, unaligned,
168                             mismatched, unsafe, access.barrier_data());
169       load = kit->gvn().transform(load);
170     } else {
171       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
172                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
173                             access.barrier_data());
174     }
175   } else {
176     assert(!requires_atomic_access, "not yet supported");
177     assert(access.is_opt_access(), "either parse or opt access");
178     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
179     Node* control = control_dependent ? opt_access.ctl() : NULL;
180     MergeMemNode* mm = opt_access.mem();

671   if (base_off % BytesPerLong != 0) {
672     assert(UseCompressedClassPointers, "");
673     if (is_array) {
674       // Exclude length to copy by 8 bytes words.
675       base_off += sizeof(int);
676     } else {
677       // Include klass to copy by 8 bytes words.
678       base_off = instanceOopDesc::klass_offset_in_bytes();
679     }
680     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
681   }
682   return base_off;
683 }
684 
685 void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
686   int base_off = arraycopy_payload_base_offset(is_array);
687   Node* payload_size = size;
688   Node* offset = kit->MakeConX(base_off);
689   payload_size = kit->gvn().transform(new SubXNode(payload_size, offset));
690   payload_size = kit->gvn().transform(new URShiftXNode(payload_size, kit->intcon(LogBytesPerLong)));
691   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
692   if (is_array) {
693     ac->set_clone_array();
694   } else {
695     ac->set_clone_inst();
696   }
697   Node* n = kit->gvn().transform(ac);
698   if (n == ac) {
699     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
700     ac->set_adr_type(TypeRawPtr::BOTTOM);
701     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
702   } else {
703     kit->set_all_memory(n);
704   }
705 }
706 
707 Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
708                                  Node*& i_o, Node*& needgc_ctrl,
709                                  Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
710                                  intx prefetch_lines) const {
711 

837   Node* ctrl = ac->in(TypeFunc::Control);
838   Node* mem = ac->in(TypeFunc::Memory);
839   Node* src = ac->in(ArrayCopyNode::Src);
840   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
841   Node* dest = ac->in(ArrayCopyNode::Dest);
842   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
843   Node* length = ac->in(ArrayCopyNode::Length);
844 
845   Node* payload_src = phase->basic_plus_adr(src, src_offset);
846   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
847 
848   const char* copyfunc_name = "arraycopy";
849   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, NULL, NULL, true, copyfunc_name, true);
850 
851   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
852   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
853 
854   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
855   phase->transform_later(call);
856 
857   phase->replace_node(ac, call);
858 }
859 
860 #undef XTOP
< prev index next >