43 if (!G1PreBarrierStubC2::needs_barrier(node)) {
44 return;
45 }
46 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
47 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
48 G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node);
49 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
50 stub->preserve(*reg);
51 }
52 for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) {
53 stub->dont_preserve(*reg);
54 }
55 g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, r15_thread, tmp, stub);
56 }
57
58 static void write_barrier_post(MacroAssembler* masm,
59 const MachNode* node,
60 Register store_addr,
61 Register new_val,
62 Register tmp1,
63 Register tmp2) {
64 if (!G1PostBarrierStubC2::needs_barrier(node)) {
65 return;
66 }
67 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
68 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
69 G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node);
70 g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, r15_thread, tmp1, tmp2, stub);
71 }
72
73 %}
74
75 instruct g1StoreP(memory mem, any_RegP src, rRegP tmp1, rRegP tmp2, rRegP tmp3, rFlagsReg cr)
76 %{
77 predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
78 match(Set mem (StoreP mem src));
79 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
80 ins_cost(125); // XXX
81 format %{ "movq $mem, $src\t# ptr" %}
82 ins_encode %{
83 // Materialize the store address internally (as opposed to defining 'mem' as
84 // an indirect memory operand) to reduce the overhead of LCM when processing
85 // large basic blocks with many stores. Such basic blocks arise, for
86 // instance, from static initializations of large String arrays.
87 // The same holds for g1StoreN and g1EncodePAndStoreN.
88 __ lea($tmp1$$Register, $mem$$Address);
89 write_barrier_pre(masm, this,
90 $tmp1$$Register /* obj */,
91 $tmp2$$Register /* pre_val */,
92 $tmp3$$Register /* tmp */,
93 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
94 __ movq(Address($tmp1$$Register, 0), $src$$Register);
95 write_barrier_post(masm, this,
96 $tmp1$$Register /* store_addr */,
97 $src$$Register /* new_val */,
98 $tmp3$$Register /* tmp1 */,
99 $tmp2$$Register /* tmp2 */);
100 %}
101 ins_pipe(ialu_mem_reg);
102 %}
103
104 instruct g1StoreN(memory mem, rRegN src, rRegP tmp1, rRegP tmp2, rRegP tmp3, rFlagsReg cr)
105 %{
106 predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
107 match(Set mem (StoreN mem src));
108 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
109 ins_cost(125); // XXX
110 format %{ "movl $mem, $src\t# ptr" %}
111 ins_encode %{
112 __ lea($tmp1$$Register, $mem$$Address);
113 write_barrier_pre(masm, this,
114 $tmp1$$Register /* obj */,
115 $tmp2$$Register /* pre_val */,
116 $tmp3$$Register /* tmp */,
117 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
118 __ movl(Address($tmp1$$Register, 0), $src$$Register);
119 if ((barrier_data() & G1C2BarrierPost) != 0) {
120 __ movl($tmp2$$Register, $src$$Register);
121 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
122 __ decode_heap_oop($tmp2$$Register);
123 } else {
|
43 if (!G1PreBarrierStubC2::needs_barrier(node)) {
44 return;
45 }
46 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
47 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
48 G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node);
49 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
50 stub->preserve(*reg);
51 }
52 for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) {
53 stub->dont_preserve(*reg);
54 }
55 g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, r15_thread, tmp, stub);
56 }
57
58 static void write_barrier_post(MacroAssembler* masm,
59 const MachNode* node,
60 Register store_addr,
61 Register new_val,
62 Register tmp1,
63 Register tmp2,
64 RegSet preserve = RegSet()) {
65 if (!G1PostBarrierStubC2::needs_barrier(node)) {
66 return;
67 }
68 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
69 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
70 G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node);
71 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
72 stub->preserve(*reg);
73 }
74 g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, r15_thread, tmp1, tmp2, stub);
75 }
76
77 %}
78
79 instruct g1StoreP(memory mem, any_RegP src, rRegP tmp1, rRegP tmp2, rRegP tmp3, rFlagsReg cr)
80 %{
81 predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
82 match(Set mem (StoreP mem src));
83 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
84 ins_cost(125); // XXX
85 format %{ "movq $mem, $src\t# ptr" %}
86 ins_encode %{
87 // Materialize the store address internally (as opposed to defining 'mem' as
88 // an indirect memory operand) to reduce the overhead of LCM when processing
89 // large basic blocks with many stores. Such basic blocks arise, for
90 // instance, from static initializations of large String arrays.
91 // The same holds for g1StoreN and g1EncodePAndStoreN.
92 __ lea($tmp1$$Register, $mem$$Address);
93 write_barrier_pre(masm, this,
94 $tmp1$$Register /* obj */,
95 $tmp2$$Register /* pre_val */,
96 $tmp3$$Register /* tmp */,
97 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
98 __ movq(Address($tmp1$$Register, 0), $src$$Register);
99 write_barrier_post(masm, this,
100 $tmp1$$Register /* store_addr */,
101 $src$$Register /* new_val */,
102 $tmp3$$Register /* tmp1 */,
103 $tmp2$$Register /* tmp2 */);
104 %}
105 ins_pipe(ialu_mem_reg);
106 %}
107
108 // TODO 8350865 (same applies to g1StoreLSpecialTwoOops)
109 // - Can we use an unbound register for src?
110 // - Do no set/overwrite barrier data here, also handle G1C2BarrierPostNotNull
111 // - Is the zero-extend really required in all the places?
112 instruct g1StoreLSpecialOneOop(memory mem, rdx_RegL src, immI off, rRegP tmp1, rRegP tmp2, rRegP tmp3, rFlagsReg cr)
113 %{
114 predicate(UseG1GC);
115 match(Set mem (StoreLSpecial mem (Binary src off)));
116 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, USE_KILL src, KILL cr);
117 format %{ "movq $mem, $src\t# g1StoreLSpecialOneOop" %}
118 ins_encode %{
119 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
120
121 __ lea($tmp1$$Register, $mem$$Address);
122 // Adjust address to point to narrow oop
123 __ addq($tmp1$$Register, $off$$constant);
124 write_barrier_pre(masm, this,
125 $tmp1$$Register /* obj */,
126 $tmp2$$Register /* pre_val */,
127 $tmp3$$Register /* tmp */,
128 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
129
130 __ movq($mem$$Address, $src$$Register);
131
132 // Shift long value to extract the narrow oop field value and zero-extend it
133 __ shrq($src$$Register, $off$$constant << LogBitsPerByte);
134 __ movl($src$$Register, $src$$Register);
135
136 write_barrier_post(masm, this,
137 $tmp1$$Register /* store_addr */,
138 $src$$Register /* new_val */,
139 $tmp3$$Register /* tmp1 */,
140 $tmp2$$Register /* tmp2 */);
141 %}
142 ins_pipe(ialu_mem_reg);
143 %}
144
145 instruct g1StoreLSpecialTwoOops(memory mem, rdx_RegL src, rRegP tmp1, rRegP tmp2, rRegP tmp3, rRegP tmp4, rFlagsReg cr)
146 %{
147 predicate(UseG1GC);
148 match(Set mem (StoreLSpecial mem src));
149 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, KILL cr);
150 format %{ "movq $mem, $src\t# g1StoreLSpecialTwoOops" %}
151 ins_encode %{
152 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
153
154 __ lea($tmp1$$Register, $mem$$Address);
155 write_barrier_pre(masm, this,
156 $tmp1$$Register /* obj */,
157 $tmp2$$Register /* pre_val */,
158 $tmp3$$Register /* tmp */,
159 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
160 // Adjust address to point to the second narrow oop in the long value
161 __ addq($tmp1$$Register, 4);
162 write_barrier_pre(masm, this,
163 $tmp1$$Register /* obj */,
164 $tmp2$$Register /* pre_val */,
165 $tmp3$$Register /* tmp */,
166 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
167
168 __ movq($mem$$Address, $src$$Register);
169
170 // Zero-extend first narrow oop to long
171 __ movl($tmp4$$Register, $src$$Register);
172
173 // Shift long value to extract the second narrow oop field value
174 __ shrq($src$$Register, 32);
175
176 write_barrier_post(masm, this,
177 $tmp1$$Register /* store_addr */,
178 $src$$Register /* new_val */,
179 $tmp3$$Register /* tmp1 */,
180 $tmp2$$Register /* tmp2 */,
181 RegSet::of($tmp1$$Register, $tmp4$$Register) /* preserve */);
182 // Adjust address again to point to the first narrow oop in the long value
183 __ subq($tmp1$$Register, 4);
184 write_barrier_post(masm, this,
185 $tmp1$$Register /* store_addr */,
186 $tmp4$$Register /* new_val */,
187 $tmp3$$Register /* tmp1 */,
188 $tmp2$$Register /* tmp2 */);
189 %}
190 ins_pipe(ialu_mem_reg);
191 %}
192
193 instruct g1StoreN(memory mem, rRegN src, rRegP tmp1, rRegP tmp2, rRegP tmp3, rFlagsReg cr)
194 %{
195 predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
196 match(Set mem (StoreN mem src));
197 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
198 ins_cost(125); // XXX
199 format %{ "movl $mem, $src\t# ptr" %}
200 ins_encode %{
201 __ lea($tmp1$$Register, $mem$$Address);
202 write_barrier_pre(masm, this,
203 $tmp1$$Register /* obj */,
204 $tmp2$$Register /* pre_val */,
205 $tmp3$$Register /* tmp */,
206 RegSet::of($tmp1$$Register, $src$$Register) /* preserve */);
207 __ movl(Address($tmp1$$Register, 0), $src$$Register);
208 if ((barrier_data() & G1C2BarrierPost) != 0) {
209 __ movl($tmp2$$Register, $src$$Register);
210 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
211 __ decode_heap_oop($tmp2$$Register);
212 } else {
|