1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetAssembler.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "gc/shared/barrierSetRuntime.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "interpreter/interp_masm.hpp"
 33 #include "memory/universe.hpp"
 34 #include "runtime/javaThread.hpp"
 35 #include "runtime/jniHandles.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/stubRoutines.hpp"
 38 #ifdef COMPILER2
 39 #include "code/vmreg.inline.hpp"
 40 #include "gc/shared/c2/barrierSetC2.hpp"
 41 #endif // COMPILER2
 42 
 43 
 44 #define __ masm->
 45 
 46 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 47                                   Register dst, Address src, Register tmp1, Register tmp2) {
 48 
 49   // LR is live.  It must be saved around calls.
 50 
 51   bool in_heap = (decorators & IN_HEAP) != 0;
 52   bool in_native = (decorators & IN_NATIVE) != 0;
 53   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 54 
 55   switch (type) {
 56   case T_OBJECT:
 57   case T_ARRAY: {
 58     if (in_heap) {
 59       if (UseCompressedOops) {
 60         __ ldrw(dst, src);
 61         if (is_not_null) {
 62           __ decode_heap_oop_not_null(dst);
 63         } else {
 64           __ decode_heap_oop(dst);
 65         }
 66       } else {
 67         __ ldr(dst, src);
 68       }
 69     } else {
 70       assert(in_native, "why else?");
 71       __ ldr(dst, src);
 72     }
 73     break;
 74   }
 75   case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
 76   case T_BYTE:    __ load_signed_byte   (dst, src); break;
 77   case T_CHAR:    __ load_unsigned_short(dst, src); break;
 78   case T_SHORT:   __ load_signed_short  (dst, src); break;
 79   case T_INT:     __ ldrw               (dst, src); break;
 80   case T_LONG:    __ ldr                (dst, src); break;
 81   case T_ADDRESS: __ ldr                (dst, src); break;
 82   case T_FLOAT:   __ ldrs               (v0, src);  break;
 83   case T_DOUBLE:  __ ldrd               (v0, src);  break;
 84   default: Unimplemented();
 85   }
 86 }
 87 
 88 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 89                                    Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 90   bool in_heap = (decorators & IN_HEAP) != 0;
 91   bool in_native = (decorators & IN_NATIVE) != 0;
 92   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 93 
 94   switch (type) {
 95   case T_OBJECT:
 96   case T_ARRAY: {
 97     if (in_heap) {
 98       if (val == noreg) {
 99         assert(!is_not_null, "inconsistent access");
100         if (UseCompressedOops) {
101           __ strw(zr, dst);
102         } else {
103           __ str(zr, dst);
104         }
105       } else {
106         if (UseCompressedOops) {
107           assert(!dst.uses(val), "not enough registers");
108           if (is_not_null) {
109             __ encode_heap_oop_not_null(val);
110           } else {
111             __ encode_heap_oop(val);
112           }
113           __ strw(val, dst);
114         } else {
115           __ str(val, dst);
116         }
117       }
118     } else {
119       assert(in_native, "why else?");
120       assert(val != noreg, "not supported");
121       __ str(val, dst);
122     }
123     break;
124   }
125   case T_BOOLEAN:
126     __ andw(val, val, 0x1);  // boolean is true if LSB is 1
127     __ strb(val, dst);
128     break;
129   case T_BYTE:    __ strb(val, dst); break;
130   case T_CHAR:    __ strh(val, dst); break;
131   case T_SHORT:   __ strh(val, dst); break;
132   case T_INT:     __ strw(val, dst); break;
133   case T_LONG:    __ str (val, dst); break;
134   case T_ADDRESS: __ str (val, dst); break;
135   case T_FLOAT:   __ strs(v0,  dst); break;
136   case T_DOUBLE:  __ strd(v0,  dst); break;
137   default: Unimplemented();
138   }
139 }
140 
141 void BarrierSetAssembler::value_copy(MacroAssembler* masm, DecoratorSet decorators,
142                                      Register src, Register dst, Register value_klass) {
143   // value_copy implementation is fairly complex, and there are not any
144   // "short-cuts" to be made from asm. What there is, appears to have the same
145   // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds
146   // of hand-rolled instructions...
147   if (decorators & IS_DEST_UNINITIALIZED) {
148     __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, value_klass);
149   } else {
150     __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, value_klass);
151   }
152 }
153 
154 void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet decorators,
155                                      Register src, Register dst, Register inline_layout_info) {
156   // flat_field_copy implementation is fairly complex, and there are not any
157   // "short-cuts" to be made from asm. What there is, appears to have the same
158   // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds
159   // of hand-rolled instructions...
160   if (decorators & IS_DEST_UNINITIALIZED) {
161     __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized2), src, dst, inline_layout_info);
162   } else {
163     __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy2), src, dst, inline_layout_info);
164   }
165 }
166 
167 void BarrierSetAssembler::copy_load_at(MacroAssembler* masm,
168                                        DecoratorSet decorators,
169                                        BasicType type,
170                                        size_t bytes,
171                                        Register dst1,
172                                        Register dst2,
173                                        Address src,
174                                        Register tmp) {
175   if (bytes == 1) {
176     assert(dst2 == noreg, "invariant");
177     __ ldrb(dst1, src);
178   } else if (bytes == 2) {
179     assert(dst2 == noreg, "invariant");
180     __ ldrh(dst1, src);
181   } else if (bytes == 4) {
182     assert(dst2 == noreg, "invariant");
183     __ ldrw(dst1, src);
184   } else if (bytes == 8) {
185     assert(dst2 == noreg, "invariant");
186     __ ldr(dst1, src);
187   } else if (bytes == 16) {
188     assert(dst2 != noreg, "invariant");
189     assert(dst2 != dst1, "invariant");
190     __ ldp(dst1, dst2, src);
191   } else {
192     // Not the right size
193     ShouldNotReachHere();
194   }
195   if ((decorators & ARRAYCOPY_CHECKCAST) != 0 && UseCompressedOops) {
196     __ decode_heap_oop(dst1);
197   }
198 }
199 
200 void BarrierSetAssembler::copy_store_at(MacroAssembler* masm,
201                                         DecoratorSet decorators,
202                                         BasicType type,
203                                         size_t bytes,
204                                         Address dst,
205                                         Register src1,
206                                         Register src2,
207                                         Register tmp1,
208                                         Register tmp2,
209                                         Register tmp3) {
210   if ((decorators & ARRAYCOPY_CHECKCAST) != 0 && UseCompressedOops) {
211     __ encode_heap_oop(src1);
212   }
213   if (bytes == 1) {
214     assert(src2 == noreg, "invariant");
215     __ strb(src1, dst);
216   } else if (bytes == 2) {
217     assert(src2 == noreg, "invariant");
218     __ strh(src1, dst);
219   } else if (bytes == 4) {
220     assert(src2 == noreg, "invariant");
221     __ strw(src1, dst);
222   } else if (bytes == 8) {
223     assert(src2 == noreg, "invariant");
224     __ str(src1, dst);
225   } else if (bytes == 16) {
226     assert(src2 != noreg, "invariant");
227     assert(src2 != src1, "invariant");
228     __ stp(src1, src2, dst);
229   } else {
230     // Not the right size
231     ShouldNotReachHere();
232   }
233 }
234 
235 void BarrierSetAssembler::copy_load_at(MacroAssembler* masm,
236                                        DecoratorSet decorators,
237                                        BasicType type,
238                                        size_t bytes,
239                                        FloatRegister dst1,
240                                        FloatRegister dst2,
241                                        Address src,
242                                        Register tmp1,
243                                        Register tmp2,
244                                        FloatRegister vec_tmp) {
245   if (bytes == 32) {
246     __ ldpq(dst1, dst2, src);
247   } else {
248     ShouldNotReachHere();
249   }
250 }
251 
252 void BarrierSetAssembler::copy_store_at(MacroAssembler* masm,
253                                         DecoratorSet decorators,
254                                         BasicType type,
255                                         size_t bytes,
256                                         Address dst,
257                                         FloatRegister src1,
258                                         FloatRegister src2,
259                                         Register tmp1,
260                                         Register tmp2,
261                                         Register tmp3,
262                                         FloatRegister vec_tmp1,
263                                         FloatRegister vec_tmp2,
264                                         FloatRegister vec_tmp3) {
265   if (bytes == 32) {
266     __ stpq(src1, src2, dst);
267   } else {
268     ShouldNotReachHere();
269   }
270 }
271 
272 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
273                                                         Register obj, Register tmp, Label& slowpath) {
274   // If mask changes we need to ensure that the inverse is still encodable as an immediate
275   STATIC_ASSERT(JNIHandles::tag_mask == 0b11);
276   __ andr(obj, obj, ~JNIHandles::tag_mask);
277   __ ldr(obj, Address(obj, 0));             // *obj
278 }
279 
280 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
281 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
282                                         Register var_size_in_bytes,
283                                         int con_size_in_bytes,
284                                         Register t1,
285                                         Register t2,
286                                         Label& slow_case) {
287   assert_different_registers(obj, t2);
288   assert_different_registers(obj, var_size_in_bytes);
289   Register end = t2;
290 
291   // verify_tlab();
292 
293   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
294   if (var_size_in_bytes == noreg) {
295     __ lea(end, Address(obj, con_size_in_bytes));
296   } else {
297     __ lea(end, Address(obj, var_size_in_bytes));
298   }
299   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
300   __ cmp(end, rscratch1);
301   __ br(Assembler::HI, slow_case);
302 
303   // update the tlab top pointer
304   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
305 
306   // recover var_size_in_bytes if necessary
307   if (var_size_in_bytes == end) {
308     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
309   }
310   // verify_tlab();
311 }
312 
313 static volatile uint32_t _patching_epoch = 0;
314 
315 address BarrierSetAssembler::patching_epoch_addr() {
316   return (address)&_patching_epoch;
317 }
318 
319 void BarrierSetAssembler::increment_patching_epoch() {
320   Atomic::inc(&_patching_epoch);
321 }
322 
323 void BarrierSetAssembler::clear_patching_epoch() {
324   _patching_epoch = 0;
325 }
326 
327 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
328   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
329 
330   if (bs_nm == nullptr) {
331     return;
332   }
333 
334   Label local_guard;
335   Label skip_barrier;
336   NMethodPatchingType patching_type = nmethod_patching_type();
337 
338   if (slow_path == nullptr) {
339     guard = &local_guard;
340   }
341 
342   // If the slow path is out of line in a stub, we flip the condition
343   Assembler::Condition condition = slow_path == nullptr ? Assembler::EQ : Assembler::NE;
344   Label& barrier_target = slow_path == nullptr ? skip_barrier : *slow_path;
345 
346   __ ldrw(rscratch1, *guard);
347 
348   if (patching_type == NMethodPatchingType::stw_instruction_and_data_patch) {
349     // With STW patching, no data or instructions are updated concurrently,
350     // which means there isn't really any need for any fencing for neither
351     // data nor instruction modifications happening concurrently. The
352     // instruction patching is handled with isb fences on the way back
353     // from the safepoint to Java. So here we can do a plain conditional
354     // branch with no fencing.
355     Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
356     __ ldrw(rscratch2, thread_disarmed_addr);
357     __ cmp(rscratch1, rscratch2);
358   } else if (patching_type == NMethodPatchingType::conc_instruction_and_data_patch) {
359     // If we patch code we need both a code patching and a loadload
360     // fence. It's not super cheap, so we use a global epoch mechanism
361     // to hide them in a slow path.
362     // The high level idea of the global epoch mechanism is to detect
363     // when any thread has performed the required fencing, after the
364     // last nmethod was disarmed. This implies that the required
365     // fencing has been performed for all preceding nmethod disarms
366     // as well. Therefore, we do not need any further fencing.
367     __ lea(rscratch2, ExternalAddress((address)&_patching_epoch));
368     // Embed an artificial data dependency to order the guard load
369     // before the epoch load.
370     __ orr(rscratch2, rscratch2, rscratch1, Assembler::LSR, 32);
371     // Read the global epoch value.
372     __ ldrw(rscratch2, rscratch2);
373     // Combine the guard value (low order) with the epoch value (high order).
374     __ orr(rscratch1, rscratch1, rscratch2, Assembler::LSL, 32);
375     // Compare the global values with the thread-local values.
376     Address thread_disarmed_and_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
377     __ ldr(rscratch2, thread_disarmed_and_epoch_addr);
378     __ cmp(rscratch1, rscratch2);
379   } else {
380     assert(patching_type == NMethodPatchingType::conc_data_patch, "must be");
381     // Subsequent loads of oops must occur after load of guard value.
382     // BarrierSetNMethod::disarm sets guard with release semantics.
383     __ membar(__ LoadLoad);
384     Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
385     __ ldrw(rscratch2, thread_disarmed_addr);
386     __ cmpw(rscratch1, rscratch2);
387   }
388   __ br(condition, barrier_target);
389 
390   if (slow_path == nullptr) {
391     __ lea(rscratch1, RuntimeAddress(StubRoutines::method_entry_barrier()));
392     __ blr(rscratch1);
393     __ b(skip_barrier);
394 
395     __ bind(local_guard);
396 
397     __ emit_int32(0);   // nmethod guard value. Skipped over in common case.
398   } else {
399     __ bind(*continuation);
400   }
401 
402   __ bind(skip_barrier);
403 }
404 
405 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
406   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
407   if (bs == nullptr) {
408     return;
409   }
410 
411   Label bad_call;
412   __ cbz(rmethod, bad_call);
413 
414   // Pointer chase to the method holder to find out if the method is concurrently unloading.
415   Label method_live;
416   __ load_method_holder_cld(rscratch1, rmethod);
417 
418   // Is it a strong CLD?
419   __ ldrw(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_ref_count_offset()));
420   __ cbnz(rscratch2, method_live);
421 
422   // Is it a weak but alive CLD?
423   __ push(RegSet::of(r10), sp);
424   __ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));
425 
426   __ resolve_weak_handle(r10, rscratch1, rscratch2);
427   __ mov(rscratch1, r10);
428   __ pop(RegSet::of(r10), sp);
429   __ cbnz(rscratch1, method_live);
430 
431   __ bind(bad_call);
432 
433   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
434   __ bind(method_live);
435 }
436 
437 void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
438   // Check if the oop is in the right area of memory
439   __ mov(tmp2, (intptr_t) Universe::verify_oop_mask());
440   __ andr(tmp1, obj, tmp2);
441   __ mov(tmp2, (intptr_t) Universe::verify_oop_bits());
442 
443   // Compare tmp1 and tmp2.  We don't use a compare
444   // instruction here because the flags register is live.
445   __ eor(tmp1, tmp1, tmp2);
446   __ cbnz(tmp1, error);
447 
448   // make sure klass is 'reasonable', which is not zero.
449   __ load_klass(obj, obj); // get klass
450   __ cbz(obj, error);      // if klass is null it is broken
451 }
452 
453 #ifdef COMPILER2
454 
455 OptoReg::Name BarrierSetAssembler::encode_float_vector_register_size(const Node* node, OptoReg::Name opto_reg) {
456   switch (node->ideal_reg()) {
457     case Op_RegF:
458       // No need to refine. The original encoding is already fine to distinguish.
459       assert(opto_reg % 4 == 0, "Float register should only occupy a single slot");
460       break;
461     // Use different encoding values of the same fp/vector register to help distinguish different sizes.
462     // Such as V16. The OptoReg::name and its corresponding slot value are
463     // "V16": 64, "V16_H": 65, "V16_J": 66, "V16_K": 67.
464     case Op_RegD:
465     case Op_VecD:
466       opto_reg &= ~3;
467       opto_reg |= 1;
468       break;
469     case Op_VecX:
470       opto_reg &= ~3;
471       opto_reg |= 2;
472       break;
473     case Op_VecA:
474       opto_reg &= ~3;
475       opto_reg |= 3;
476       break;
477     default:
478       assert(false, "unexpected ideal register");
479       ShouldNotReachHere();
480   }
481   return opto_reg;
482 }
483 
484 OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
485   if (!OptoReg::is_reg(opto_reg)) {
486     return OptoReg::Bad;
487   }
488 
489   const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
490   if (vm_reg->is_FloatRegister()) {
491     opto_reg = encode_float_vector_register_size(node, opto_reg);
492   }
493 
494   return opto_reg;
495 }
496 
497 #undef __
498 #define __ _masm->
499 
500 void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
501   int index = -1;
502   GrowableArray<RegisterData> registers;
503   VMReg prev_vm_reg = VMRegImpl::Bad();
504 
505   RegMaskIterator rmi(stub->preserve_set());
506   while (rmi.has_next()) {
507     OptoReg::Name opto_reg = rmi.next();
508     VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
509 
510     if (vm_reg->is_Register()) {
511       // GPR may have one or two slots in regmask
512       // Determine whether the current vm_reg is the same physical register as the previous one
513       if (is_same_register(vm_reg, prev_vm_reg)) {
514         registers.at(index)._slots++;
515       } else {
516         RegisterData reg_data = { vm_reg, 1 };
517         index = registers.append(reg_data);
518       }
519     } else if (vm_reg->is_FloatRegister()) {
520       // We have size encoding in OptoReg of stub->preserve_set()
521       // After encoding, float/neon/sve register has only one slot in regmask
522       // Decode it to get the actual size
523       VMReg vm_reg_base = vm_reg->as_FloatRegister()->as_VMReg();
524       int slots = decode_float_vector_register_size(opto_reg);
525       RegisterData reg_data = { vm_reg_base, slots };
526       index = registers.append(reg_data);
527     } else if (vm_reg->is_PRegister()) {
528       // PRegister has only one slot in regmask
529       RegisterData reg_data = { vm_reg, 1 };
530       index = registers.append(reg_data);
531     } else {
532       assert(false, "Unknown register type");
533       ShouldNotReachHere();
534     }
535     prev_vm_reg = vm_reg;
536   }
537 
538   // Record registers that needs to be saved/restored
539   for (GrowableArrayIterator<RegisterData> it = registers.begin(); it != registers.end(); ++it) {
540     RegisterData reg_data = *it;
541     VMReg vm_reg = reg_data._reg;
542     int slots = reg_data._slots;
543     if (vm_reg->is_Register()) {
544       assert(slots == 1 || slots == 2, "Unexpected register save size");
545       _gp_regs += RegSet::of(vm_reg->as_Register());
546     } else if (vm_reg->is_FloatRegister()) {
547       if (slots == 1 || slots == 2) {
548         _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
549       } else if (slots == 4) {
550         _neon_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
551       } else {
552         assert(slots == Matcher::scalable_vector_reg_size(T_FLOAT), "Unexpected register save size");
553         _sve_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
554       }
555     } else {
556       assert(vm_reg->is_PRegister() && slots == 1, "Unknown register type");
557       _p_regs += PRegSet::of(vm_reg->as_PRegister());
558     }
559   }
560 
561   // Remove C-ABI SOE registers and scratch regs
562   _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9);
563 
564   // Remove C-ABI SOE fp registers
565   _fp_regs -= FloatRegSet::range(v8, v15);
566 }
567 
568 enum RC SaveLiveRegisters::rc_class(VMReg reg) {
569   if (reg->is_reg()) {
570     if (reg->is_Register()) {
571       return rc_int;
572     } else if (reg->is_FloatRegister()) {
573       return rc_float;
574     } else if (reg->is_PRegister()) {
575       return rc_predicate;
576     }
577   }
578   if (reg->is_stack()) {
579     return rc_stack;
580   }
581   return rc_bad;
582 }
583 
584 bool SaveLiveRegisters::is_same_register(VMReg reg1, VMReg reg2) {
585   if (reg1 == reg2) {
586     return true;
587   }
588   if (rc_class(reg1) == rc_class(reg2)) {
589     if (reg1->is_Register()) {
590       return reg1->as_Register() == reg2->as_Register();
591     } else if (reg1->is_FloatRegister()) {
592       return reg1->as_FloatRegister() == reg2->as_FloatRegister();
593     } else if (reg1->is_PRegister()) {
594       return reg1->as_PRegister() == reg2->as_PRegister();
595     }
596   }
597   return false;
598 }
599 
600 int SaveLiveRegisters::decode_float_vector_register_size(OptoReg::Name opto_reg) {
601   switch (opto_reg & 3) {
602     case 0:
603       return 1;
604     case 1:
605       return 2;
606     case 2:
607       return 4;
608     case 3:
609       return Matcher::scalable_vector_reg_size(T_FLOAT);
610     default:
611       ShouldNotReachHere();
612       return 0;
613   }
614 }
615 
616 SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub)
617   : _masm(masm),
618     _gp_regs(),
619     _fp_regs(),
620     _neon_regs(),
621     _sve_regs(),
622     _p_regs() {
623 
624   // Figure out what registers to save/restore
625   initialize(stub);
626 
627   // Save registers
628   __ push(_gp_regs, sp);
629   __ push_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
630   __ push_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
631   __ push_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
632   __ push_p(_p_regs, sp);
633 }
634 
635 SaveLiveRegisters::~SaveLiveRegisters() {
636   // Restore registers
637   __ pop_p(_p_regs, sp);
638   __ pop_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
639   __ pop_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
640   __ pop_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
641 
642   // External runtime call may clobber ptrue reg
643   __ reinitialize_ptrue();
644 
645   __ pop(_gp_regs, sp);
646 }
647 
648 #endif // COMPILER2