1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "gc/g1/g1BarrierSet.hpp"
 28 #include "gc/g1/g1BarrierSetAssembler.hpp"
 29 #include "gc/g1/g1BarrierSetRuntime.hpp"
 30 #include "gc/g1/g1CardTable.hpp"
 31 #include "gc/g1/g1ThreadLocalData.hpp"
 32 #include "gc/g1/heapRegion.hpp"
 33 #include "interpreter/interp_masm.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "utilities/debug.hpp"
 36 #include "utilities/macros.hpp"
 37 #ifdef COMPILER1
 38 #include "c1/c1_LIRAssembler.hpp"
 39 #include "c1/c1_MacroAssembler.hpp"
 40 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 41 #endif
 42 
 43 #define __ masm->
 44 
 45 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
 46                                                             Register addr, Register count) {
 47   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
 48 
 49   if (!dest_uninitialized) {
 50     Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
 51 #ifndef _LP64
 52     __ push(thread);
 53     __ get_thread(thread);
 54 #endif
 55 
 56     Label filtered;
 57     Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
 58     // Is marking active?
 59     if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 60       __ cmpl(in_progress, 0);
 61     } else {
 62       assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 63       __ cmpb(in_progress, 0);
 64     }
 65 
 66     NOT_LP64(__ pop(thread);)
 67 
 68     __ jcc(Assembler::equal, filtered);
 69 
 70     __ push_call_clobbered_registers(false /* save_fpu */);
 71 #ifdef _LP64
 72     if (count == c_rarg0) {
 73       if (addr == c_rarg1) {
 74         // exactly backwards!!
 75         __ xchgptr(c_rarg1, c_rarg0);
 76       } else {
 77         __ movptr(c_rarg1, count);
 78         __ movptr(c_rarg0, addr);
 79       }
 80     } else {
 81       __ movptr(c_rarg0, addr);
 82       __ movptr(c_rarg1, count);
 83     }
 84     if (UseCompressedOops) {
 85       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
 86     } else {
 87       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
 88     }
 89 #else
 90     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry),
 91                     addr, count);
 92 #endif
 93     __ pop_call_clobbered_registers(false /* save_fpu */);
 94 
 95     __ bind(filtered);
 96   }
 97 }
 98 
 99 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
100                                                              Register addr, Register count, Register tmp) {
101   __ push_call_clobbered_registers(false /* save_fpu */);
102 #ifdef _LP64
103   if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
104     assert_different_registers(c_rarg1, addr);
105     __ mov(c_rarg1, count);
106     __ mov(c_rarg0, addr);
107   } else {
108     assert_different_registers(c_rarg0, count);
109     __ mov(c_rarg0, addr);
110     __ mov(c_rarg1, count);
111   }
112   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
113 #else
114   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry),
115                   addr, count);
116 #endif
117   __ pop_call_clobbered_registers(false /* save_fpu */);
118 }
119 
120 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
121                                     Register dst, Address src, Register tmp1, Register tmp_thread) {
122   bool on_oop = is_reference_type(type);
123   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
124   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
125   bool on_reference = on_weak || on_phantom;
126   ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
127   if (on_oop && on_reference) {
128     Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
129 
130 #ifndef _LP64
131     // Work around the x86_32 bug that only manifests with Loom for some reason.
132     // MacroAssembler::resolve_weak_handle calls this barrier with tmp_thread == noreg.
133     if (thread == noreg) {
134       if (dst != rcx && tmp1 != rcx) {
135         thread = rcx;
136       } else if (dst != rdx && tmp1 != rdx) {
137         thread = rdx;
138       } else if (dst != rdi && tmp1 != rdi) {
139         thread = rdi;
140       }
141     }
142     assert_different_registers(dst, tmp1, thread);
143     __ push(thread);
144     __ get_thread(thread);
145 #endif
146 
147     // Generate the G1 pre-barrier code to log the value of
148     // the referent field in an SATB buffer.
149     g1_write_barrier_pre(masm /* masm */,
150                          noreg /* obj */,
151                          dst /* pre_val */,
152                          thread /* thread */,
153                          tmp1 /* tmp */,
154                          true /* tosca_live */,
155                          true /* expand_call */);
156 
157 #ifndef _LP64
158     __ pop(thread);
159 #endif
160   }
161 }
162 
163 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
164                                                  Register obj,
165                                                  Register pre_val,
166                                                  Register thread,
167                                                  Register tmp,
168                                                  bool tosca_live,
169                                                  bool expand_call) {
170   // If expand_call is true then we expand the call_VM_leaf macro
171   // directly to skip generating the check by
172   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
173 
174 #ifdef _LP64
175   assert(thread == r15_thread, "must be");
176 #endif // _LP64
177 
178   Label done;
179   Label runtime;
180 
181   assert(pre_val != noreg, "check this code");
182 
183   if (obj != noreg) {
184     assert_different_registers(obj, pre_val, tmp);
185     assert(pre_val != rax, "check this code");
186   }
187 
188   Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
189   Address index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
190   Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
191 
192   // Is marking active?
193   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
194     __ cmpl(in_progress, 0);
195   } else {
196     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
197     __ cmpb(in_progress, 0);
198   }
199   __ jcc(Assembler::equal, done);
200 
201   // Do we need to load the previous value?
202   if (obj != noreg) {
203     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
204   }
205 
206   // Is the previous value null?
207   __ cmpptr(pre_val, NULL_WORD);
208   __ jcc(Assembler::equal, done);
209 
210   // Can we store original value in the thread's buffer?
211   // Is index == 0?
212   // (The index field is typed as size_t.)
213 
214   __ movptr(tmp, index);                   // tmp := *index_adr
215   __ cmpptr(tmp, 0);                       // tmp == 0?
216   __ jcc(Assembler::equal, runtime);       // If yes, goto runtime
217 
218   __ subptr(tmp, wordSize);                // tmp := tmp - wordSize
219   __ movptr(index, tmp);                   // *index_adr := tmp
220   __ addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
221 
222   // Record the previous value
223   __ movptr(Address(tmp, 0), pre_val);
224   __ jmp(done);
225 
226   __ bind(runtime);
227 
228   // Determine and save the live input values
229   __ push_call_clobbered_registers();
















230 
231   // Calling the runtime using the regular call_VM_leaf mechanism generates
232   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
233   // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
234   //
235   // If we care generating the pre-barrier without a frame (e.g. in the
236   // intrinsified Reference.get() routine) then ebp might be pointing to
237   // the caller frame and so this check will most likely fail at runtime.
238   //
239   // Expanding the call directly bypasses the generation of the check.
240   // So when we do not have have a full interpreter frame on the stack
241   // expand_call should be passed true.
242 
243   if (expand_call) {
244     LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
245 #ifdef _LP64
246     if (c_rarg1 != thread) {
247       __ mov(c_rarg1, thread);
248     }
249     if (c_rarg0 != pre_val) {
250       __ mov(c_rarg0, pre_val);
251     }
252 #else
253     __ push(thread);
254     __ push(pre_val);
255 #endif
256     __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
257   } else {
258     __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
259   }
260 
261   __ pop_call_clobbered_registers();














262 
263   __ bind(done);
264 }
265 
266 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
267                                                   Register store_addr,
268                                                   Register new_val,
269                                                   Register thread,
270                                                   Register tmp,
271                                                   Register tmp2) {
272   // Generated code assumes that buffer index is pointer sized.
273   STATIC_ASSERT(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t));
274 #ifdef _LP64
275   assert(thread == r15_thread, "must be");
276 #endif // _LP64
277 
278   Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
279   Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
280 
281   CardTableBarrierSet* ct =
282     barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
283 
284   Label done;
285   Label runtime;
286 
287   // Does store cross heap regions?
288 
289   __ movptr(tmp, store_addr);
290   __ xorptr(tmp, new_val);
291   __ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
292   __ jcc(Assembler::equal, done);
293 
294   // crosses regions, storing null?
295 
296   __ cmpptr(new_val, NULL_WORD);
297   __ jcc(Assembler::equal, done);
298 
299   // storing region crossing non-null, is card already dirty?
300 
301   const Register card_addr = tmp;
302   const Register cardtable = tmp2;
303 
304   __ movptr(card_addr, store_addr);
305   __ shrptr(card_addr, CardTable::card_shift());
306   // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
307   // a valid address and therefore is not properly handled by the relocation code.
308   __ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base());
309   __ addptr(card_addr, cardtable);
310 
311   __ cmpb(Address(card_addr, 0), G1CardTable::g1_young_card_val());
312   __ jcc(Assembler::equal, done);
313 
314   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
315   __ cmpb(Address(card_addr, 0), G1CardTable::dirty_card_val());
316   __ jcc(Assembler::equal, done);
317 
318 
319   // storing a region crossing, non-null oop, card is clean.
320   // dirty card and log.
321 
322   __ movb(Address(card_addr, 0), G1CardTable::dirty_card_val());
323 
324   __ movptr(tmp2, queue_index);
325   __ testptr(tmp2, tmp2);
326   __ jcc(Assembler::zero, runtime);
327   __ subptr(tmp2, wordSize);
328   __ movptr(queue_index, tmp2);
329   __ addptr(tmp2, buffer);
330   __ movptr(Address(tmp2, 0), card_addr);
331   __ jmp(done);
332 
333   __ bind(runtime);
334   // save the live input values
335   RegSet saved = RegSet::of(store_addr NOT_LP64(COMMA thread));
336   __ push_set(saved);















337   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
338   __ pop_set(saved);













339 
340   __ bind(done);
341 }
342 
343 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
344                                          Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
345   bool in_heap = (decorators & IN_HEAP) != 0;
346   bool as_normal = (decorators & AS_NORMAL) != 0;

347 
348   bool needs_pre_barrier = as_normal;
349   bool needs_post_barrier = val != noreg && in_heap;
350 
351   Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
352   // flatten object address if needed
353   // We do it regardless of precise because we need the registers
354   if (dst.index() == noreg && dst.disp() == 0) {
355     if (dst.base() != tmp1) {
356       __ movptr(tmp1, dst.base());
357     }
358   } else {
359     __ lea(tmp1, dst);
360   }
361 
362 #ifndef _LP64
363   InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm);
364 #endif
365 
366   NOT_LP64(__ get_thread(rcx));
367   NOT_LP64(imasm->save_bcp());
368 
369   if (needs_pre_barrier) {
370     g1_write_barrier_pre(masm /*masm*/,
371                          tmp1 /* obj */,
372                          tmp2 /* pre_val */,
373                          rthread /* thread */,
374                          tmp3  /* tmp */,
375                          val != noreg /* tosca_live */,
376                          false /* expand_call */);
377   }
378   if (val == noreg) {
379     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
380   } else {
381     Register new_val = val;
382     if (needs_post_barrier) {
383       // G1 barrier needs uncompressed oop for region cross check.
384       if (UseCompressedOops) {
385         new_val = tmp2;
386         __ movptr(new_val, val);
387       }
388     }
389     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
390     if (needs_post_barrier) {
391       g1_write_barrier_post(masm /*masm*/,
392                             tmp1 /* store_adr */,
393                             new_val /* new_val */,
394                             rthread /* thread */,
395                             tmp3 /* tmp */,
396                             tmp2 /* tmp2 */);
397     }
398   }
399   NOT_LP64(imasm->restore_bcp());
400 }
401 
402 #ifdef COMPILER1
403 
404 #undef __
405 #define __ ce->masm()->
406 
407 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
408   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
409   // At this point we know that marking is in progress.
410   // If do_load() is true then we have to emit the
411   // load of the previous value; otherwise it has already
412   // been loaded into _pre_val.
413 
414   __ bind(*stub->entry());
415   assert(stub->pre_val()->is_register(), "Precondition.");
416 
417   Register pre_val_reg = stub->pre_val()->as_register();
418 
419   if (stub->do_load()) {
420     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
421   }
422 
423   __ cmpptr(pre_val_reg, NULL_WORD);
424   __ jcc(Assembler::equal, *stub->continuation());
425   ce->store_parameter(stub->pre_val()->as_register(), 0);
426   __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
427   __ jmp(*stub->continuation());
428 
429 }
430 
431 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
432   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
433   __ bind(*stub->entry());
434   assert(stub->addr()->is_register(), "Precondition.");
435   assert(stub->new_val()->is_register(), "Precondition.");
436   Register new_val_reg = stub->new_val()->as_register();
437   __ cmpptr(new_val_reg, NULL_WORD);
438   __ jcc(Assembler::equal, *stub->continuation());
439   ce->store_parameter(stub->addr()->as_pointer_register(), 0);
440   __ call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
441   __ jmp(*stub->continuation());
442 }
443 
444 #undef __
445 
446 #define __ sasm->
447 
448 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
449   // Generated code assumes that buffer index is pointer sized.
450   STATIC_ASSERT(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t));
451 
452   __ prologue("g1_pre_barrier", false);
453   // arg0 : previous value of memory
454 
455   __ push(rax);
456   __ push(rdx);
457 
458   const Register pre_val = rax;
459   const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
460   const Register tmp = rdx;
461 
462   NOT_LP64(__ get_thread(thread);)
463 
464   Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
465   Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
466   Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
467 
468   Label done;
469   Label runtime;
470 
471   // Is marking still active?
472   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
473     __ cmpl(queue_active, 0);
474   } else {
475     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
476     __ cmpb(queue_active, 0);
477   }
478   __ jcc(Assembler::equal, done);
479 
480   // Can we store original value in the thread's buffer?
481 
482   __ movptr(tmp, queue_index);
483   __ testptr(tmp, tmp);
484   __ jcc(Assembler::zero, runtime);
485   __ subptr(tmp, wordSize);
486   __ movptr(queue_index, tmp);
487   __ addptr(tmp, buffer);
488 
489   // prev_val (rax)
490   __ load_parameter(0, pre_val);
491   __ movptr(Address(tmp, 0), pre_val);
492   __ jmp(done);
493 
494   __ bind(runtime);
495 
496   __ push_call_clobbered_registers();
497 
498   // load the pre-value
499   __ load_parameter(0, rcx);
500   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), rcx, thread);
501 
502   __ pop_call_clobbered_registers();
503 
504   __ bind(done);
505 
506   __ pop(rdx);
507   __ pop(rax);
508 
509   __ epilogue();
510 }
511 
512 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
513   __ prologue("g1_post_barrier", false);
514 
515   CardTableBarrierSet* ct =
516     barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
517 
518   Label done;
519   Label enqueued;
520   Label runtime;
521 
522   // At this point we know new_value is non-null and the new_value crosses regions.
523   // Must check to see if card is already dirty
524 
525   const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
526 
527   Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
528   Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
529 
530   __ push(rax);
531   __ push(rcx);
532 
533   const Register cardtable = rax;
534   const Register card_addr = rcx;
535 
536   __ load_parameter(0, card_addr);
537   __ shrptr(card_addr, CardTable::card_shift());
538   // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
539   // a valid address and therefore is not properly handled by the relocation code.
540   __ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base());
541   __ addptr(card_addr, cardtable);
542 
543   NOT_LP64(__ get_thread(thread);)
544 
545   __ cmpb(Address(card_addr, 0), G1CardTable::g1_young_card_val());
546   __ jcc(Assembler::equal, done);
547 
548   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
549   __ cmpb(Address(card_addr, 0), CardTable::dirty_card_val());
550   __ jcc(Assembler::equal, done);
551 
552   // storing region crossing non-null, card is clean.
553   // dirty card and log.
554 
555   __ movb(Address(card_addr, 0), CardTable::dirty_card_val());
556 
557   const Register tmp = rdx;
558   __ push(rdx);
559 
560   __ movptr(tmp, queue_index);
561   __ testptr(tmp, tmp);
562   __ jcc(Assembler::zero, runtime);
563   __ subptr(tmp, wordSize);
564   __ movptr(queue_index, tmp);
565   __ addptr(tmp, buffer);
566   __ movptr(Address(tmp, 0), card_addr);
567   __ jmp(enqueued);
568 
569   __ bind(runtime);
570   __ push_call_clobbered_registers();
571 
572   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
573 
574   __ pop_call_clobbered_registers();
575 
576   __ bind(enqueued);
577   __ pop(rdx);
578 
579   __ bind(done);
580   __ pop(rcx);
581   __ pop(rax);
582 
583   __ epilogue();
584 }
585 
586 #undef __
587 
588 #endif // COMPILER1
--- EOF ---