7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/g1/g1BarrierSet.hpp"
28 #include "gc/g1/g1BarrierSetAssembler.hpp"
29 #include "gc/g1/g1BarrierSetRuntime.hpp"
30 #include "gc/g1/g1CardTable.hpp"
31 #include "gc/g1/g1HeapRegion.hpp"
32 #include "gc/g1/g1ThreadLocalData.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "runtime/javaThread.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_LIRAssembler.hpp"
39 #include "c1/c1_MacroAssembler.hpp"
40 #include "gc/g1/c1/g1BarrierSetC1.hpp"
41 #endif // COMPILER1
42 #ifdef COMPILER2
43 #include "gc/g1/c2/g1BarrierSetC2.hpp"
44 #endif // COMPILER2
45
46 #define __ masm->
191 assert(pre_val != c_rarg1, "smashed arg");
192 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
193 } else {
194 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
195 }
196
197 __ pop_call_clobbered_registers();
198
199 __ bind(done);
200
201 }
202
203 static void generate_post_barrier_fast_path(MacroAssembler* masm,
204 const Register store_addr,
205 const Register new_val,
206 const Register tmp1,
207 const Register tmp2,
208 Label& done,
209 bool new_val_may_be_null) {
210 // Does store cross heap regions?
211 __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
212 __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
213 __ cbz(tmp1, done);
214 // Crosses regions, storing null?
215 if (new_val_may_be_null) {
216 __ cbz(new_val, done);
217 }
218 // Storing region crossing non-null, is card young?
219 __ lsr(tmp1, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base
220 __ load_byte_map_base(tmp2); // tmp2 := card table base address
221 __ add(tmp1, tmp1, tmp2); // tmp1 := card address
222 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card
223 __ cmpw(tmp2, (int)G1CardTable::g1_young_card_val()); // tmp2 := card == young_card_val?
224 }
225
226 static void generate_post_barrier_slow_path(MacroAssembler* masm,
227 const Register thread,
228 const Register tmp1,
229 const Register tmp2,
230 Label& done,
231 Label& runtime) {
232 __ membar(Assembler::StoreLoad); // StoreLoad membar
233 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card
234 __ cbzw(tmp2, done);
235 // Storing a region crossing, non-null oop, card is clean.
236 // Dirty card and log.
237 STATIC_ASSERT(CardTable::dirty_card_val() == 0);
238 __ strb(zr, Address(tmp1)); // *(card address) := dirty_card_val
239 generate_queue_test_and_insertion(masm,
265 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
266
267 __ bind(runtime);
268 // save the live input values
269 RegSet saved = RegSet::of(store_addr);
270 __ push(saved, sp);
271 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
272 __ pop(saved, sp);
273
274 __ bind(done);
275 }
276
277 #if defined(COMPILER2)
278
279 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
280 SaveLiveRegisters save_registers(masm, stub);
281 if (c_rarg0 != arg) {
282 __ mov(c_rarg0, arg);
283 }
284 __ mov(c_rarg1, rthread);
285 __ mov(rscratch1, runtime_path);
286 __ blr(rscratch1);
287 }
288
289 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
290 Register obj,
291 Register pre_val,
292 Register thread,
293 Register tmp1,
294 Register tmp2,
295 G1PreBarrierStubC2* stub) {
296 assert(thread == rthread, "must be");
297 assert_different_registers(obj, pre_val, tmp1, tmp2);
298 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
299
300 stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
301
302 generate_pre_barrier_fast_path(masm, thread, tmp1);
303 // If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
304 __ cbnzw(tmp1, *stub->entry());
305
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #if INCLUDE_CDS
28 #include "code/SCCache.hpp"
29 #endif
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1BarrierSetAssembler.hpp"
32 #include "gc/g1/g1BarrierSetRuntime.hpp"
33 #include "gc/g1/g1CardTable.hpp"
34 #include "gc/g1/g1HeapRegion.hpp"
35 #include "gc/g1/g1ThreadLocalData.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "interpreter/interp_masm.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #ifdef COMPILER1
41 #include "c1/c1_LIRAssembler.hpp"
42 #include "c1/c1_MacroAssembler.hpp"
43 #include "gc/g1/c1/g1BarrierSetC1.hpp"
44 #endif // COMPILER1
45 #ifdef COMPILER2
46 #include "gc/g1/c2/g1BarrierSetC2.hpp"
47 #endif // COMPILER2
48
49 #define __ masm->
194 assert(pre_val != c_rarg1, "smashed arg");
195 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
196 } else {
197 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
198 }
199
200 __ pop_call_clobbered_registers();
201
202 __ bind(done);
203
204 }
205
206 static void generate_post_barrier_fast_path(MacroAssembler* masm,
207 const Register store_addr,
208 const Register new_val,
209 const Register tmp1,
210 const Register tmp2,
211 Label& done,
212 bool new_val_may_be_null) {
213 // Does store cross heap regions?
214 #if INCLUDE_CDS
215 // AOT code needs to load the barrier grain shift from the aot
216 // runtime constants area in the code cache otherwise we can compile
217 // it as an immediate operand
218 if (SCCache::is_on_for_write()) {
219 address grain_shift_address = (address)AOTRuntimeConstants::grain_shift_address();
220 __ eor(tmp1, store_addr, new_val);
221 __ lea(tmp2, ExternalAddress(grain_shift_address));
222 __ ldrb(tmp2, tmp2);
223 __ lsrv(tmp1, tmp1, tmp2);
224 __ cbz(tmp1, done);
225 } else
226 #endif
227 {
228 __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
229 __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
230 __ cbz(tmp1, done);
231 }
232
233 // Crosses regions, storing null?
234 if (new_val_may_be_null) {
235 __ cbz(new_val, done);
236 }
237 // Storing region crossing non-null, is card young?
238
239 #if INCLUDE_CDS
240 // AOT code needs to load the barrier card shift from the aot
241 // runtime constants area in the code cache otherwise we can compile
242 // it as an immediate operand
243 if (SCCache::is_on_for_write()) {
244 address card_shift_address = (address)AOTRuntimeConstants::card_shift_address();
245 __ lea(tmp2, ExternalAddress(card_shift_address));
246 __ ldrb(tmp2, tmp2);
247 __ lsrv(tmp1, store_addr, tmp2); // tmp1 := card address relative to card table base
248 } else
249 #endif
250 {
251 __ lsr(tmp1, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base
252 }
253
254 __ load_byte_map_base(tmp2); // tmp2 := card table base address
255 __ add(tmp1, tmp1, tmp2); // tmp1 := card address
256 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card
257 __ cmpw(tmp2, (int)G1CardTable::g1_young_card_val()); // tmp2 := card == young_card_val?
258 }
259
260 static void generate_post_barrier_slow_path(MacroAssembler* masm,
261 const Register thread,
262 const Register tmp1,
263 const Register tmp2,
264 Label& done,
265 Label& runtime) {
266 __ membar(Assembler::StoreLoad); // StoreLoad membar
267 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card
268 __ cbzw(tmp2, done);
269 // Storing a region crossing, non-null oop, card is clean.
270 // Dirty card and log.
271 STATIC_ASSERT(CardTable::dirty_card_val() == 0);
272 __ strb(zr, Address(tmp1)); // *(card address) := dirty_card_val
273 generate_queue_test_and_insertion(masm,
299 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
300
301 __ bind(runtime);
302 // save the live input values
303 RegSet saved = RegSet::of(store_addr);
304 __ push(saved, sp);
305 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
306 __ pop(saved, sp);
307
308 __ bind(done);
309 }
310
311 #if defined(COMPILER2)
312
313 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
314 SaveLiveRegisters save_registers(masm, stub);
315 if (c_rarg0 != arg) {
316 __ mov(c_rarg0, arg);
317 }
318 __ mov(c_rarg1, rthread);
319 __ lea(rscratch1, RuntimeAddress(runtime_path));
320 __ blr(rscratch1);
321 }
322
323 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
324 Register obj,
325 Register pre_val,
326 Register thread,
327 Register tmp1,
328 Register tmp2,
329 G1PreBarrierStubC2* stub) {
330 assert(thread == rthread, "must be");
331 assert_different_registers(obj, pre_val, tmp1, tmp2);
332 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
333
334 stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
335
336 generate_pre_barrier_fast_path(masm, thread, tmp1);
337 // If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
338 __ cbnzw(tmp1, *stub->entry());
339
|