< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page

  31 #include "opto/convertnode.hpp"
  32 #include "opto/graphKit.hpp"
  33 #include "opto/idealKit.hpp"
  34 #include "opto/macro.hpp"
  35 #include "opto/narrowptrnode.hpp"
  36 #include "opto/output.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "utilities/macros.hpp"
  40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
  41 
  42 // By default this is a no-op.
  43 void BarrierSetC2::resolve_address(C2Access& access) const { }
  44 
  45 void* C2ParseAccess::barrier_set_state() const {
  46   return _kit->barrier_set_state();
  47 }
  48 
  49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
  50 




  51 bool C2Access::needs_cpu_membar() const {
  52   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
  53   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
  54 
  55   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
  56   bool in_heap   = (_decorators & IN_HEAP) != 0;
  57   bool in_native = (_decorators & IN_NATIVE) != 0;
  58   bool is_mixed  = !in_heap && !in_native;
  59 
  60   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
  61   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
  62   bool is_atomic = is_read && is_write;
  63 
  64   if (is_atomic) {
  65     // Atomics always need to be wrapped in CPU membars
  66     return true;
  67   }
  68 
  69   if (anonymous) {
  70     // We will need memory barriers unless we can determine a unique

 184   DecoratorSet decorators = access.decorators();
 185 
 186   Node* adr = access.addr().node();
 187   const TypePtr* adr_type = access.addr().type();
 188 
 189   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 190   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 191   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 192   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 193   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
 194   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 195   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
 196 
 197   MemNode::MemOrd mo = access.mem_node_mo();
 198   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
 199 
 200   Node* load;
 201   if (access.is_parse_access()) {
 202     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 203     GraphKit* kit = parse_access.kit();
 204     Node* control = control_dependent ? kit->control() : nullptr;
 205 
 206     if (immutable) {
 207       Compile* C = Compile::current();
 208       Node* mem = kit->immutable_memory();
 209       load = LoadNode::make(kit->gvn(), control, mem, adr,
 210                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
 211                             unaligned, mismatched, unsafe, access.barrier_data());
 212       load = kit->gvn().transform(load);
 213     } else {
 214       load = kit->make_load(control, adr, val_type, access.type(), mo,
 215                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
 216                             access.barrier_data());
 217     }
 218   } else {
 219     assert(access.is_opt_access(), "either parse or opt access");
 220     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 221     Node* control = control_dependent ? opt_access.ctl() : nullptr;
 222     MergeMemNode* mm = opt_access.mem();
 223     PhaseGVN& gvn = opt_access.gvn();
 224     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));

 852   assert(size->bottom_type()->base() == Type_X,
 853          "Should be of object size type (int for 32 bits, long for 64 bits)");
 854 
 855   // The native clone we are calling here expects the object size in words.
 856   // Add header/offset size to payload size to get object size.
 857   Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
 858   Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
 859   // HeapAccess<>::clone expects size in heap words.
 860   // For 64-bits platforms, this is a no-operation.
 861   // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
 862   Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
 863 
 864   Node* const call = phase->make_leaf_call(ctrl,
 865                                            mem,
 866                                            clone_type(),
 867                                            clone_addr,
 868                                            clone_name,
 869                                            TypeRawPtr::BOTTOM,
 870                                            src, dst, full_size_in_heap_words XTOP);
 871   phase->transform_later(call);
 872   phase->igvn().replace_node(ac, call);
 873 }
 874 
 875 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
 876   Node* ctrl = ac->in(TypeFunc::Control);
 877   Node* mem = ac->in(TypeFunc::Memory);
 878   Node* src = ac->in(ArrayCopyNode::Src);
 879   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
 880   Node* dest = ac->in(ArrayCopyNode::Dest);
 881   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
 882   Node* length = ac->in(ArrayCopyNode::Length);
 883 
 884   Node* payload_src = phase->basic_plus_adr(src, src_offset);
 885   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
 886 
 887   const char* copyfunc_name = "arraycopy";
 888   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
 889 
 890   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 891   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
 892 
 893   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
 894   phase->transform_later(call);
 895 
 896   phase->igvn().replace_node(ac, call);
 897 }
 898 
 899 #undef XTOP
 900 
 901 static bool block_has_safepoint(const Block* block, uint from, uint to) {
 902   for (uint i = from; i < to; i++) {
 903     if (block->get_node(i)->is_MachSafePoint()) {
 904       // Safepoint found
 905       return true;
 906     }
 907   }
 908 
 909   // Safepoint not found
 910   return false;
 911 }
 912 
 913 static bool block_has_safepoint(const Block* block) {
 914   return block_has_safepoint(block, 0, block->number_of_nodes());
 915 }
 916 

  31 #include "opto/convertnode.hpp"
  32 #include "opto/graphKit.hpp"
  33 #include "opto/idealKit.hpp"
  34 #include "opto/macro.hpp"
  35 #include "opto/narrowptrnode.hpp"
  36 #include "opto/output.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "utilities/macros.hpp"
  40 #include CPU_HEADER(gc/shared/barrierSetAssembler)
  41 
  42 // By default this is a no-op.
  43 void BarrierSetC2::resolve_address(C2Access& access) const { }
  44 
  45 void* C2ParseAccess::barrier_set_state() const {
  46   return _kit->barrier_set_state();
  47 }
  48 
  49 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
  50 
  51 Node* C2ParseAccess::control() const {
  52   return _ctl == nullptr ? _kit->control() : _ctl;
  53 }
  54 
  55 bool C2Access::needs_cpu_membar() const {
  56   bool mismatched   = (_decorators & C2_MISMATCHED) != 0;
  57   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
  58 
  59   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
  60   bool in_heap   = (_decorators & IN_HEAP) != 0;
  61   bool in_native = (_decorators & IN_NATIVE) != 0;
  62   bool is_mixed  = !in_heap && !in_native;
  63 
  64   bool is_write  = (_decorators & C2_WRITE_ACCESS) != 0;
  65   bool is_read   = (_decorators & C2_READ_ACCESS) != 0;
  66   bool is_atomic = is_read && is_write;
  67 
  68   if (is_atomic) {
  69     // Atomics always need to be wrapped in CPU membars
  70     return true;
  71   }
  72 
  73   if (anonymous) {
  74     // We will need memory barriers unless we can determine a unique

 188   DecoratorSet decorators = access.decorators();
 189 
 190   Node* adr = access.addr().node();
 191   const TypePtr* adr_type = access.addr().type();
 192 
 193   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 194   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 195   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 196   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 197   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
 198   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 199   bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
 200 
 201   MemNode::MemOrd mo = access.mem_node_mo();
 202   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
 203 
 204   Node* load;
 205   if (access.is_parse_access()) {
 206     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 207     GraphKit* kit = parse_access.kit();
 208     Node* control = control_dependent ? parse_access.control() : nullptr;
 209 
 210     if (immutable) {
 211       Compile* C = Compile::current();
 212       Node* mem = kit->immutable_memory();
 213       load = LoadNode::make(kit->gvn(), control, mem, adr,
 214                             adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
 215                             unaligned, mismatched, unsafe, access.barrier_data());
 216       load = kit->gvn().transform(load);
 217     } else {
 218       load = kit->make_load(control, adr, val_type, access.type(), mo,
 219                             dep, requires_atomic_access, unaligned, mismatched, unsafe,
 220                             access.barrier_data());
 221     }
 222   } else {
 223     assert(access.is_opt_access(), "either parse or opt access");
 224     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 225     Node* control = control_dependent ? opt_access.ctl() : nullptr;
 226     MergeMemNode* mm = opt_access.mem();
 227     PhaseGVN& gvn = opt_access.gvn();
 228     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));

 856   assert(size->bottom_type()->base() == Type_X,
 857          "Should be of object size type (int for 32 bits, long for 64 bits)");
 858 
 859   // The native clone we are calling here expects the object size in words.
 860   // Add header/offset size to payload size to get object size.
 861   Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
 862   Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
 863   // HeapAccess<>::clone expects size in heap words.
 864   // For 64-bits platforms, this is a no-operation.
 865   // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
 866   Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
 867 
 868   Node* const call = phase->make_leaf_call(ctrl,
 869                                            mem,
 870                                            clone_type(),
 871                                            clone_addr,
 872                                            clone_name,
 873                                            TypeRawPtr::BOTTOM,
 874                                            src, dst, full_size_in_heap_words XTOP);
 875   phase->transform_later(call);
 876   phase->replace_node(ac, call);
 877 }
 878 
 879 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
 880   Node* ctrl = ac->in(TypeFunc::Control);
 881   Node* mem = ac->in(TypeFunc::Memory);
 882   Node* src = ac->in(ArrayCopyNode::Src);
 883   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
 884   Node* dest = ac->in(ArrayCopyNode::Dest);
 885   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
 886   Node* length = ac->in(ArrayCopyNode::Length);
 887 
 888   Node* payload_src = phase->basic_plus_adr(src, src_offset);
 889   Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
 890 
 891   const char* copyfunc_name = "arraycopy";
 892   address     copyfunc_addr = phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, copyfunc_name, true);
 893 
 894   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
 895   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
 896 
 897   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
 898   phase->transform_later(call);
 899 
 900   phase->replace_node(ac, call);
 901 }
 902 
 903 #undef XTOP
 904 
 905 static bool block_has_safepoint(const Block* block, uint from, uint to) {
 906   for (uint i = from; i < to; i++) {
 907     if (block->get_node(i)->is_MachSafePoint()) {
 908       // Safepoint found
 909       return true;
 910     }
 911   }
 912 
 913   // Safepoint not found
 914   return false;
 915 }
 916 
 917 static bool block_has_safepoint(const Block* block) {
 918   return block_has_safepoint(block, 0, block->number_of_nodes());
 919 }
 920 
< prev index next >