1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/macroAssembler.inline.hpp"
 26 #include "gc/g1/g1BarrierSet.hpp"
 27 #include "gc/g1/g1BarrierSetAssembler.hpp"
 28 #include "gc/g1/g1BarrierSetRuntime.hpp"
 29 #include "gc/g1/g1CardTable.hpp"
 30 #include "gc/g1/g1HeapRegion.hpp"
 31 #include "gc/g1/g1ThreadLocalData.hpp"
 32 #include "gc/shared/collectedHeap.hpp"
 33 #include "interpreter/interp_masm.hpp"
 34 #include "runtime/javaThread.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #ifdef COMPILER1
 37 #include "c1/c1_LIRAssembler.hpp"
 38 #include "c1/c1_MacroAssembler.hpp"
 39 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 40 #endif // COMPILER1
 41 #ifdef COMPILER2
 42 #include "gc/g1/c2/g1BarrierSetC2.hpp"
 43 #endif // COMPILER2
 44 
 45 #define __ masm->
 46 
 47 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
 48                                                             Register addr, Register count, RegSet saved_regs) {
 49   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
 50   if (!dest_uninitialized) {
 51     Label done;
 52     Address in_progress(rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
 53 
 54     // Is marking active?
 55     if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 56       __ ldrw(rscratch1, in_progress);
 57     } else {
 58       assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 59       __ ldrb(rscratch1, in_progress);
 60     }
 61     __ cbzw(rscratch1, done);
 62 
 63     __ push(saved_regs, sp);
 64     if (count == c_rarg0) {
 65       if (addr == c_rarg1) {
 66         // exactly backwards!!
 67         __ mov(rscratch1, c_rarg0);
 68         __ mov(c_rarg0, c_rarg1);
 69         __ mov(c_rarg1, rscratch1);
 70       } else {
 71         __ mov(c_rarg1, count);
 72         __ mov(c_rarg0, addr);
 73       }
 74     } else {
 75       __ mov(c_rarg0, addr);
 76       __ mov(c_rarg1, count);
 77     }
 78     if (UseCompressedOops) {
 79       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
 80     } else {
 81       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
 82     }
 83     __ pop(saved_regs, sp);
 84 
 85     __ bind(done);
 86   }
 87 }
 88 
 89 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm,
 90                                                              DecoratorSet decorators,
 91                                                              Register start,
 92                                                              Register count,
 93                                                              Register scratch) {
 94 
 95   Label done;
 96   Label loop;
 97   Label next;
 98 
 99   __ cbz(count, done);
100 
101   // Calculate the number of card marks to set. Since the object might start and
102   // end within a card, we need to calculate this via the card table indexes of
103   // the actual start and last addresses covered by the object.
104   // Temporarily use the count register for the last element address.
105   __ lea(count, Address(start, count, Address::lsl(LogBytesPerHeapOop))); // end = start + count << LogBytesPerHeapOop
106   __ sub(count, count, BytesPerHeapOop);                                  // Use last element address for end.
107 
108   __ lsr(start, start, CardTable::card_shift());
109   __ lsr(count, count, CardTable::card_shift());
110   __ sub(count, count, start);                                            // Number of bytes to mark - 1.
111 
112   // Add card table base offset to start.
113   __ ldr(scratch, Address(rthread, in_bytes(G1ThreadLocalData::card_table_base_offset())));
114   __ add(start, start, scratch);
115 
116   __ bind(loop);
117   if (UseCondCardMark) {
118     __ ldrb(scratch, Address(start, count));
119     // Instead of loading clean_card_val and comparing, we exploit the fact that
120     // the LSB of non-clean cards is always 0, and the LSB of clean cards 1.
121     __ tbz(scratch, 0, next);
122   }
123   static_assert(G1CardTable::dirty_card_val() == 0, "must be to use zr");
124   __ strb(zr, Address(start, count));
125   __ bind(next);
126   __ subs(count, count, 1);
127   __ br(Assembler::GE, loop);
128 
129   __ bind(done);
130 }
131 
132 static void generate_queue_test_and_insertion(MacroAssembler* masm, ByteSize index_offset, ByteSize buffer_offset, Label& runtime,
133                                               const Register thread, const Register value, const Register temp1, const Register temp2) {
134   assert_different_registers(value, temp1, temp2);
135   // Can we store a value in the given thread's buffer?
136   // (The index field is typed as size_t.)
137   __ ldr(temp1, Address(thread, in_bytes(index_offset)));   // temp1 := *(index address)
138   __ cbz(temp1, runtime);                                   // jump to runtime if index == 0 (full buffer)
139   // The buffer is not full, store value into it.
140   __ sub(temp1, temp1, wordSize);                           // temp1 := next index
141   __ str(temp1, Address(thread, in_bytes(index_offset)));   // *(index address) := next index
142   __ ldr(temp2, Address(thread, in_bytes(buffer_offset)));  // temp2 := buffer address
143   __ str(value, Address(temp2, temp1));                     // *(buffer address + next index) := value
144 }
145 
146 static void generate_pre_barrier_fast_path(MacroAssembler* masm,
147                                            const Register thread,
148                                            const Register tmp1) {
149   Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
150   // Is marking active?
151   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
152     __ ldrw(tmp1, in_progress);
153   } else {
154     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
155     __ ldrb(tmp1, in_progress);
156   }
157 }
158 
159 static void generate_pre_barrier_slow_path(MacroAssembler* masm,
160                                            const Register obj,
161                                            const Register pre_val,
162                                            const Register thread,
163                                            const Register tmp1,
164                                            const Register tmp2,
165                                            Label& done,
166                                            Label& runtime) {
167   // Do we need to load the previous value?
168   if (obj != noreg) {
169     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
170   }
171   // Is the previous value null?
172   __ cbz(pre_val, done);
173   generate_queue_test_and_insertion(masm,
174                                     G1ThreadLocalData::satb_mark_queue_index_offset(),
175                                     G1ThreadLocalData::satb_mark_queue_buffer_offset(),
176                                     runtime,
177                                     thread, pre_val, tmp1, tmp2);
178   __ b(done);
179 }
180 
181 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
182                                                  Register obj,
183                                                  Register pre_val,
184                                                  Register thread,
185                                                  Register tmp1,
186                                                  Register tmp2,
187                                                  bool tosca_live,
188                                                  bool expand_call) {
189   // If expand_call is true then we expand the call_VM_leaf macro
190   // directly to skip generating the check by
191   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
192 
193   assert(thread == rthread, "must be");
194 
195   Label done;
196   Label runtime;
197 
198   assert_different_registers(obj, pre_val, tmp1, tmp2);
199   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
200 
201   generate_pre_barrier_fast_path(masm, thread, tmp1);
202   // If marking is not active (*(mark queue active address) == 0), jump to done
203   __ cbzw(tmp1, done);
204   generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
205 
206   __ bind(runtime);
207 
208   // save the live input values
209   RegSet saved = RegSet::of(pre_val);
210   FloatRegSet fsaved;
211 
212   // Barriers might be emitted when converting between (scalarized) calling
213   // conventions for inline types. Save all argument registers before calling
214   // into the runtime.
215 
216   // TODO 8366717 This came with 8284161: Implementation of Virtual Threads (Preview) later in May 2022
217   // Check if it's sufficient
218   //__ push_call_clobbered_registers();
219   assert_different_registers(rscratch1, pre_val); // push_CPU_state trashes rscratch1
220   __ push_CPU_state(true);
221 
222   // Calling the runtime using the regular call_VM_leaf mechanism generates
223   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
224   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
225   //
226   // If we care generating the pre-barrier without a frame (e.g. in the
227   // intrinsified Reference.get() routine) then rfp might be pointing to
228   // the caller frame and so this check will most likely fail at runtime.
229   //
230   // Expanding the call directly bypasses the generation of the check.
231   // So when we do not have have a full interpreter frame on the stack
232   // expand_call should be passed true.
233 
234   if (expand_call) {
235     assert(pre_val != c_rarg1, "smashed arg");
236     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
237   } else {
238     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
239   }
240 
241   __ pop_CPU_state(true);
242 
243   __ bind(done);
244 
245 }
246 
247 static void generate_post_barrier(MacroAssembler* masm,
248                                   const Register store_addr,
249                                   const Register new_val,
250                                   const Register thread,
251                                   const Register tmp1,
252                                   const Register tmp2,
253                                   Label& done,
254                                   bool new_val_may_be_null) {
255   assert(thread == rthread, "must be");
256   assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg, rscratch1);
257 
258   // Does store cross heap regions?
259   __ eor(tmp1, store_addr, new_val);                     // tmp1 := store address ^ new value
260   __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);   // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
261   __ cbz(tmp1, done);
262   // Crosses regions, storing null?
263   if (new_val_may_be_null) {
264     __ cbz(new_val, done);
265   }
266   // Storing region crossing non-null.
267   __ lsr(tmp1, store_addr, CardTable::card_shift());     // tmp1 := card address relative to card table base
268 
269   Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
270   __ ldr(tmp2, card_table_addr);                         // tmp2 := card table base address
271   if (UseCondCardMark) {
272     __ ldrb(rscratch1, Address(tmp1, tmp2));             // rscratch1 := card
273     // Instead of loading clean_card_val and comparing, we exploit the fact that
274     // the LSB of non-clean cards is always 0, and the LSB of clean cards 1.
275     __ tbz(rscratch1, 0, done);
276   }
277   static_assert(G1CardTable::dirty_card_val() == 0, "must be to use zr");
278   __ strb(zr, Address(tmp1, tmp2));                      // *(card address) := dirty_card_val
279 }
280 
281 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
282                                                   Register store_addr,
283                                                   Register new_val,
284                                                   Register thread,
285                                                   Register tmp1,
286                                                   Register tmp2) {
287   Label done;
288   generate_post_barrier(masm, store_addr, new_val, thread, tmp1, tmp2, done, false /* new_val_may_be_null */);
289   __ bind(done);
290 }
291 
292 #if defined(COMPILER2)
293 
294 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
295   SaveLiveRegisters save_registers(masm, stub);
296   if (c_rarg0 != arg) {
297     __ mov(c_rarg0, arg);
298   }
299   __ mov(c_rarg1, rthread);
300   __ mov(rscratch1, runtime_path);
301   __ blr(rscratch1);
302 }
303 
304 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
305                                                     Register obj,
306                                                     Register pre_val,
307                                                     Register thread,
308                                                     Register tmp1,
309                                                     Register tmp2,
310                                                     G1PreBarrierStubC2* stub) {
311   assert(thread == rthread, "must be");
312   assert_different_registers(obj, pre_val, tmp1, tmp2);
313   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
314 
315   stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
316 
317   generate_pre_barrier_fast_path(masm, thread, tmp1);
318   // If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
319   __ cbnzw(tmp1, *stub->entry());
320 
321   __ bind(*stub->continuation());
322 }
323 
324 void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
325                                                          G1PreBarrierStubC2* stub) const {
326   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
327   Label runtime;
328   Register obj = stub->obj();
329   Register pre_val = stub->pre_val();
330   Register thread = stub->thread();
331   Register tmp1 = stub->tmp1();
332   Register tmp2 = stub->tmp2();
333 
334   __ bind(*stub->entry());
335   generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, *stub->continuation(), runtime);
336 
337   __ bind(runtime);
338   generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
339   __ b(*stub->continuation());
340 }
341 
342 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
343                                                      Register store_addr,
344                                                      Register new_val,
345                                                      Register thread,
346                                                      Register tmp1,
347                                                      Register tmp2,
348                                                      bool new_val_may_be_null) {
349   Label done;
350   generate_post_barrier(masm, store_addr, new_val, thread, tmp1, tmp2, done, new_val_may_be_null);
351   __ bind(done);
352 }
353 
354 #endif // COMPILER2
355 
356 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
357                                     Register dst, Address src, Register tmp1, Register tmp2) {
358   bool on_oop = is_reference_type(type);
359   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
360   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
361   bool on_reference = on_weak || on_phantom;
362   CardTableBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
363   if (on_oop && on_reference) {
364     // LR is live.  It must be saved around calls.
365     __ enter(/*strip_ret_addr*/true); // barrier may call runtime
366     // Generate the G1 pre-barrier code to log the value of
367     // the referent field in an SATB buffer.
368     g1_write_barrier_pre(masm /* masm */,
369                          noreg /* obj */,
370                          dst /* pre_val */,
371                          rthread /* thread */,
372                          tmp1 /* tmp1 */,
373                          tmp2 /* tmp2 */,
374                          true /* tosca_live */,
375                          true /* expand_call */);
376     __ leave();
377   }
378 }
379 
380 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
381                                          Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
382 
383   bool in_heap = (decorators & IN_HEAP) != 0;
384   bool as_normal = (decorators & AS_NORMAL) != 0;
385   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
386 
387   bool needs_pre_barrier = as_normal && !dest_uninitialized;
388   bool needs_post_barrier = (val != noreg && in_heap);
389 
390   assert_different_registers(val, tmp1, tmp2, tmp3);
391 
392   // flatten object address if needed
393   if (dst.index() == noreg && dst.offset() == 0) {
394     if (dst.base() != tmp3) {
395       __ mov(tmp3, dst.base());
396     }
397   } else {
398     __ lea(tmp3, dst);
399   }
400 
401   if (needs_pre_barrier) {
402     g1_write_barrier_pre(masm,
403                          tmp3 /* obj */,
404                          tmp2 /* pre_val */,
405                          rthread /* thread */,
406                          tmp1  /* tmp1 */,
407                          rscratch2  /* tmp2 */,
408                          val != noreg /* tosca_live */,
409                          false /* expand_call */);
410   }
411 
412   if (val == noreg) {
413     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
414   } else {
415     // G1 barrier needs uncompressed oop for region cross check.
416     Register new_val = val;
417     if (needs_post_barrier) {
418       if (UseCompressedOops) {
419         new_val = rscratch2;
420         __ mov(new_val, val);
421       }
422     }
423 
424     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
425     if (needs_post_barrier) {
426       g1_write_barrier_post(masm,
427                             tmp3 /* store_adr */,
428                             new_val /* new_val */,
429                             rthread /* thread */,
430                             tmp1 /* tmp1 */,
431                             tmp2 /* tmp2 */);
432     }
433   }
434 
435 }
436 
437 #ifdef COMPILER1
438 
439 #undef __
440 #define __ ce->masm()->
441 
442 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
443   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
444   // At this point we know that marking is in progress.
445   // If do_load() is true then we have to emit the
446   // load of the previous value; otherwise it has already
447   // been loaded into _pre_val.
448 
449   __ bind(*stub->entry());
450 
451   assert(stub->pre_val()->is_register(), "Precondition.");
452 
453   Register pre_val_reg = stub->pre_val()->as_register();
454 
455   if (stub->do_load()) {
456     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
457   }
458   __ cbz(pre_val_reg, *stub->continuation());
459   ce->store_parameter(stub->pre_val()->as_register(), 0);
460   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
461   __ b(*stub->continuation());
462 }
463 
464 #undef __
465 
466 void G1BarrierSetAssembler::g1_write_barrier_post_c1(MacroAssembler* masm,
467                                                      Register store_addr,
468                                                      Register new_val,
469                                                      Register thread,
470                                                      Register tmp1,
471                                                      Register tmp2) {
472   Label done;
473   generate_post_barrier(masm, store_addr, new_val, thread, tmp1, tmp2, done, true /* new_val_may_be_null */);
474   masm->bind(done);
475 }
476 
477 #define __ sasm->
478 
479 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
480   __ prologue("g1_pre_barrier", false);
481 
482   // arg0 : previous value of memory
483 
484   BarrierSet* bs = BarrierSet::barrier_set();
485 
486   const Register pre_val = r0;
487   const Register thread = rthread;
488   const Register tmp = rscratch1;
489 
490   Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
491   Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
492   Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
493 
494   Label done;
495   Label runtime;
496 
497   // Is marking still active?
498   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
499     __ ldrw(tmp, in_progress);
500   } else {
501     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
502     __ ldrb(tmp, in_progress);
503   }
504   __ cbzw(tmp, done);
505 
506   // Can we store original value in the thread's buffer?
507   __ ldr(tmp, queue_index);
508   __ cbz(tmp, runtime);
509 
510   __ sub(tmp, tmp, wordSize);
511   __ str(tmp, queue_index);
512   __ ldr(rscratch2, buffer);
513   __ add(tmp, tmp, rscratch2);
514   __ load_parameter(0, rscratch2);
515   __ str(rscratch2, Address(tmp, 0));
516   __ b(done);
517 
518   __ bind(runtime);
519   __ push_call_clobbered_registers();
520   __ load_parameter(0, pre_val);
521   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
522   __ pop_call_clobbered_registers();
523   __ bind(done);
524 
525   __ epilogue();
526 }
527 
528 #undef __
529 
530 #endif // COMPILER1