< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page




 115       st->set_mismatched_access();
 116     }
 117     store = gvn.transform(st);
 118     if (store == st) {
 119       mm->set_memory_at(alias, st);
 120     }
 121   }
 122   return store;
 123 }
 124 
 125 Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 126   DecoratorSet decorators = access.decorators();
 127 
 128   Node* adr = access.addr().node();
 129   const TypePtr* adr_type = access.addr().type();
 130 
 131   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 132   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 133   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 134   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 135   bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
 136   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 137 
 138   bool in_native = (decorators & IN_NATIVE) != 0;
 139 
 140   MemNode::MemOrd mo = access.mem_node_mo();
 141   LoadNode::ControlDependency dep = unknown_control ? LoadNode::UnknownControl : LoadNode::DependsOnlyOnTest;
 142 
 143   Node* load;
 144   if (access.is_parse_access()) {
 145     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 146     GraphKit* kit = parse_access.kit();
 147     Node* control = control_dependent ? kit->control() : NULL;
 148 
 149     if (in_native) {
 150       load = kit->make_load(control, adr, val_type, access.type(), mo);
 151     } else {
 152       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
 153                             dep, requires_atomic_access, unaligned, mismatched, unsafe);
 154     }
 155     access.set_raw_access(load);
 156   } else {
 157     assert(!requires_atomic_access, "not yet supported");
 158     assert(access.is_opt_access(), "either parse or opt access");
 159     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 160     Node* control = control_dependent ? opt_access.ctl() : NULL;
 161     MergeMemNode* mm = opt_access.mem();


 332 
 333 void C2Access::fixup_decorators() {
 334   bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0;
 335   bool is_unordered = (_decorators & MO_UNORDERED) != 0 || default_mo;
 336   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 337 
 338   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
 339   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
 340 
 341   if (AlwaysAtomicAccesses && is_unordered) {
 342     _decorators &= ~MO_DECORATOR_MASK; // clear the MO bits
 343     _decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess
 344   }
 345 
 346   _decorators = AccessInternal::decorator_fixup(_decorators);
 347 
 348   if (is_read && !is_write && anonymous) {
 349     // To be valid, unsafe loads may depend on other conditions than
 350     // the one that guards them: pin the Load node
 351     _decorators |= C2_CONTROL_DEPENDENT_LOAD;
 352     _decorators |= C2_UNKNOWN_CONTROL_LOAD;
 353     const TypePtr* adr_type = _addr.type();
 354     Node* adr = _addr.node();
 355     if (!needs_cpu_membar() && adr_type->isa_instptr()) {
 356       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
 357       intptr_t offset = Type::OffsetBot;
 358       AddPNode::Ideal_base_and_offset(adr, &gvn(), offset);
 359       if (offset >= 0) {
 360         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
 361         if (offset < s) {
 362           // Guaranteed to be a valid access, no need to pin it
 363           _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
 364           _decorators ^= C2_UNKNOWN_CONTROL_LOAD;
 365         }
 366       }
 367     }
 368   }
 369 }
 370 
 371 //--------------------------- atomic operations---------------------------------
 372 
 373 void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const {
 374   if (!access.needs_pinning()) {
 375     return;
 376   }
 377   // SCMemProjNodes represent the memory state of a LoadStore. Their
 378   // main role is to prevent LoadStore nodes from being optimized away
 379   // when their results aren't used.
 380   assert(access.is_parse_access(), "entry not supported at optimization time");
 381   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 382   GraphKit* kit = parse_access.kit();
 383   Node* load_store = access.raw_access();
 384   assert(load_store != NULL, "must pin atomic op");




 115       st->set_mismatched_access();
 116     }
 117     store = gvn.transform(st);
 118     if (store == st) {
 119       mm->set_memory_at(alias, st);
 120     }
 121   }
 122   return store;
 123 }
 124 
 125 Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 126   DecoratorSet decorators = access.decorators();
 127 
 128   Node* adr = access.addr().node();
 129   const TypePtr* adr_type = access.addr().type();
 130 
 131   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 132   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 133   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 134   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 135   bool pinned = (decorators & C2_PINNED_LOAD) != 0;
 136   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 137 
 138   bool in_native = (decorators & IN_NATIVE) != 0;
 139 
 140   MemNode::MemOrd mo = access.mem_node_mo();
 141   LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
 142 
 143   Node* load;
 144   if (access.is_parse_access()) {
 145     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 146     GraphKit* kit = parse_access.kit();
 147     Node* control = control_dependent ? kit->control() : NULL;
 148 
 149     if (in_native) {
 150       load = kit->make_load(control, adr, val_type, access.type(), mo);
 151     } else {
 152       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
 153                             dep, requires_atomic_access, unaligned, mismatched, unsafe);
 154     }
 155     access.set_raw_access(load);
 156   } else {
 157     assert(!requires_atomic_access, "not yet supported");
 158     assert(access.is_opt_access(), "either parse or opt access");
 159     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 160     Node* control = control_dependent ? opt_access.ctl() : NULL;
 161     MergeMemNode* mm = opt_access.mem();


 332 
 333 void C2Access::fixup_decorators() {
 334   bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0;
 335   bool is_unordered = (_decorators & MO_UNORDERED) != 0 || default_mo;
 336   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
 337 
 338   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
 339   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
 340 
 341   if (AlwaysAtomicAccesses && is_unordered) {
 342     _decorators &= ~MO_DECORATOR_MASK; // clear the MO bits
 343     _decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess
 344   }
 345 
 346   _decorators = AccessInternal::decorator_fixup(_decorators);
 347 
 348   if (is_read && !is_write && anonymous) {
 349     // To be valid, unsafe loads may depend on other conditions than
 350     // the one that guards them: pin the Load node
 351     _decorators |= C2_CONTROL_DEPENDENT_LOAD;
 352     _decorators |= C2_PINNED_LOAD;
 353     const TypePtr* adr_type = _addr.type();
 354     Node* adr = _addr.node();
 355     if (!needs_cpu_membar() && adr_type->isa_instptr()) {
 356       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
 357       intptr_t offset = Type::OffsetBot;
 358       AddPNode::Ideal_base_and_offset(adr, &gvn(), offset);
 359       if (offset >= 0) {
 360         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
 361         if (offset < s) {
 362           // Guaranteed to be a valid access, no need to pin it
 363           _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
 364           _decorators ^= C2_PINNED_LOAD;
 365         }
 366       }
 367     }
 368   }
 369 }
 370 
 371 //--------------------------- atomic operations---------------------------------
 372 
 373 void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const {
 374   if (!access.needs_pinning()) {
 375     return;
 376   }
 377   // SCMemProjNodes represent the memory state of a LoadStore. Their
 378   // main role is to prevent LoadStore nodes from being optimized away
 379   // when their results aren't used.
 380   assert(access.is_parse_access(), "entry not supported at optimization time");
 381   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 382   GraphKit* kit = parse_access.kit();
 383   Node* load_store = access.raw_access();
 384   assert(load_store != NULL, "must pin atomic op");


< prev index next >