112 return _node->barrier_data();
113 }
114
115 void BarrierStubC2::preserve(Register r) {
116 const VMReg vm_reg = r->as_VMReg();
117 assert(vm_reg->is_Register(), "r must be a general-purpose register");
118 _preserve.insert(OptoReg::as_OptoReg(vm_reg));
119 }
120
121 void BarrierStubC2::dont_preserve(Register r) {
122 VMReg vm_reg = r->as_VMReg();
123 assert(vm_reg->is_Register(), "r must be a general-purpose register");
124 // Subtract the given register and all its sub-registers (e.g. {R11, R11_H}
125 // for r11 in aarch64).
126 do {
127 _preserve.remove(OptoReg::as_OptoReg(vm_reg));
128 vm_reg = vm_reg->next();
129 } while (vm_reg->is_Register() && !vm_reg->is_concrete());
130 }
131
132 const RegMask& BarrierStubC2::preserve_set() const {
133 return _preserve;
134 }
135
136 Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
137 DecoratorSet decorators = access.decorators();
138
139 bool mismatched = (decorators & C2_MISMATCHED) != 0;
140 bool unaligned = (decorators & C2_UNALIGNED) != 0;
141 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
142 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
143
144 MemNode::MemOrd mo = access.mem_node_mo();
145
146 Node* store;
147 BasicType bt = access.type();
148 if (access.is_parse_access()) {
149 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
150
151 GraphKit* kit = parse_access.kit();
1094 !is_concrete(access_offset) ||
1095 !is_concrete(mem_offset)) {
1096 // No information available
1097 continue;
1098 }
1099
1100 if (mem_obj != access_obj || mem_offset != access_offset) {
1101 // Not the same addresses, not a candidate
1102 continue;
1103 }
1104 assert(is_concrete(access_offset) && access_offset >= 0,
1105 "candidate non-allocation-dominated access offsets must be concrete and nonnegative");
1106 }
1107
1108 Block* mem_block = cfg->get_block_for_node(mem);
1109 const uint mem_index = block_index(mem_block, mem);
1110
1111 if (access_block == mem_block) {
1112 // Earlier accesses in the same block
1113 if (mem_index < access_index && !block_has_safepoint(mem_block, mem_index + 1, access_index)) {
1114 elide_dominated_barrier(access);
1115 }
1116 } else if (mem_block->dominates(access_block)) {
1117 // Dominating block? Look around for safepoints
1118 ResourceMark rm;
1119 Block_List stack;
1120 VectorSet visited;
1121 stack.push(access_block);
1122 bool safepoint_found = block_has_safepoint(access_block);
1123 while (!safepoint_found && stack.size() > 0) {
1124 const Block* const block = stack.pop();
1125 if (visited.test_set(block->_pre_order)) {
1126 continue;
1127 }
1128 if (block_has_safepoint(block)) {
1129 safepoint_found = true;
1130 break;
1131 }
1132 if (block == mem_block) {
1133 continue;
1134 }
1135
1136 // Push predecessor blocks
1137 for (uint p = 1; p < block->num_preds(); ++p) {
1138 Block* const pred = cfg->get_block_for_node(block->pred(p));
1139 stack.push(pred);
1140 }
1141 }
1142
1143 if (!safepoint_found) {
1144 elide_dominated_barrier(access);
1145 }
1146 }
1147 }
1148 }
1149 }
1150
1151 void BarrierSetC2::compute_liveness_at_stubs() const {
1152 ResourceMark rm;
1153 Compile* const C = Compile::current();
1154 Arena* const A = Thread::current()->resource_area();
1155 PhaseCFG* const cfg = C->cfg();
1156 PhaseRegAlloc* const regalloc = C->regalloc();
1157 RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
1158 BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
1159 BarrierSetC2State* bs_state = barrier_set_state();
1160 Block_List worklist;
1161
1162 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
1163 new ((void*)(live + i)) RegMask();
1164 worklist.push(cfg->get_block(i));
|
112 return _node->barrier_data();
113 }
114
115 void BarrierStubC2::preserve(Register r) {
116 const VMReg vm_reg = r->as_VMReg();
117 assert(vm_reg->is_Register(), "r must be a general-purpose register");
118 _preserve.insert(OptoReg::as_OptoReg(vm_reg));
119 }
120
121 void BarrierStubC2::dont_preserve(Register r) {
122 VMReg vm_reg = r->as_VMReg();
123 assert(vm_reg->is_Register(), "r must be a general-purpose register");
124 // Subtract the given register and all its sub-registers (e.g. {R11, R11_H}
125 // for r11 in aarch64).
126 do {
127 _preserve.remove(OptoReg::as_OptoReg(vm_reg));
128 vm_reg = vm_reg->next();
129 } while (vm_reg->is_Register() && !vm_reg->is_concrete());
130 }
131
132 bool BarrierStubC2::is_preserved(Register r) {
133 const VMReg vm_reg = r->as_VMReg();
134 assert(vm_reg->is_Register(), "r must be a general-purpose register");
135 return _preserve.member(OptoReg::as_OptoReg(vm_reg));
136 }
137
138 const RegMask& BarrierStubC2::preserve_set() const {
139 return _preserve;
140 }
141
142 Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
143 DecoratorSet decorators = access.decorators();
144
145 bool mismatched = (decorators & C2_MISMATCHED) != 0;
146 bool unaligned = (decorators & C2_UNALIGNED) != 0;
147 bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
148 bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
149
150 MemNode::MemOrd mo = access.mem_node_mo();
151
152 Node* store;
153 BasicType bt = access.type();
154 if (access.is_parse_access()) {
155 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
156
157 GraphKit* kit = parse_access.kit();
1100 !is_concrete(access_offset) ||
1101 !is_concrete(mem_offset)) {
1102 // No information available
1103 continue;
1104 }
1105
1106 if (mem_obj != access_obj || mem_offset != access_offset) {
1107 // Not the same addresses, not a candidate
1108 continue;
1109 }
1110 assert(is_concrete(access_offset) && access_offset >= 0,
1111 "candidate non-allocation-dominated access offsets must be concrete and nonnegative");
1112 }
1113
1114 Block* mem_block = cfg->get_block_for_node(mem);
1115 const uint mem_index = block_index(mem_block, mem);
1116
1117 if (access_block == mem_block) {
1118 // Earlier accesses in the same block
1119 if (mem_index < access_index && !block_has_safepoint(mem_block, mem_index + 1, access_index)) {
1120 elide_dominated_barrier(access, mem->is_Mach() ? mem->as_Mach() : nullptr);
1121 }
1122 } else if (mem_block->dominates(access_block)) {
1123 // Dominating block? Look around for safepoints
1124 ResourceMark rm;
1125 Block_List stack;
1126 VectorSet visited;
1127 stack.push(access_block);
1128 bool safepoint_found = block_has_safepoint(access_block);
1129 while (!safepoint_found && stack.size() > 0) {
1130 const Block* const block = stack.pop();
1131 if (visited.test_set(block->_pre_order)) {
1132 continue;
1133 }
1134 if (block_has_safepoint(block)) {
1135 safepoint_found = true;
1136 break;
1137 }
1138 if (block == mem_block) {
1139 continue;
1140 }
1141
1142 // Push predecessor blocks
1143 for (uint p = 1; p < block->num_preds(); ++p) {
1144 Block* const pred = cfg->get_block_for_node(block->pred(p));
1145 stack.push(pred);
1146 }
1147 }
1148
1149 if (!safepoint_found) {
1150 elide_dominated_barrier(access, mem->is_Mach() ? mem->as_Mach() : nullptr);
1151 }
1152 }
1153 }
1154 }
1155 }
1156
1157 void BarrierSetC2::compute_liveness_at_stubs() const {
1158 ResourceMark rm;
1159 Compile* const C = Compile::current();
1160 Arena* const A = Thread::current()->resource_area();
1161 PhaseCFG* const cfg = C->cfg();
1162 PhaseRegAlloc* const regalloc = C->regalloc();
1163 RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
1164 BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
1165 BarrierSetC2State* bs_state = barrier_set_state();
1166 Block_List worklist;
1167
1168 for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
1169 new ((void*)(live + i)) RegMask();
1170 worklist.push(cfg->get_block(i));
|