1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderData.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "gc/shared/barrierSetNMethod.hpp" 30 #include "gc/shared/barrierSetRuntime.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "memory/universe.hpp" 34 #include "runtime/javaThread.hpp" 35 #include "runtime/jniHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #ifdef COMPILER2 39 #include "code/vmreg.inline.hpp" 40 #include "gc/shared/c2/barrierSetC2.hpp" 41 #endif // COMPILER2 42 43 44 #define __ masm-> 45 46 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 47 Register dst, Address src, Register tmp1, Register tmp2) { 48 49 // LR is live. It must be saved around calls. 50 51 bool in_heap = (decorators & IN_HEAP) != 0; 52 bool in_native = (decorators & IN_NATIVE) != 0; 53 bool is_not_null = (decorators & IS_NOT_NULL) != 0; 54 55 switch (type) { 56 case T_OBJECT: 57 case T_ARRAY: { 58 if (in_heap) { 59 if (UseCompressedOops) { 60 __ ldrw(dst, src); 61 if (is_not_null) { 62 __ decode_heap_oop_not_null(dst); 63 } else { 64 __ decode_heap_oop(dst); 65 } 66 } else { 67 __ ldr(dst, src); 68 } 69 } else { 70 assert(in_native, "why else?"); 71 __ ldr(dst, src); 72 } 73 break; 74 } 75 case T_BOOLEAN: __ load_unsigned_byte (dst, src); break; 76 case T_BYTE: __ load_signed_byte (dst, src); break; 77 case T_CHAR: __ load_unsigned_short(dst, src); break; 78 case T_SHORT: __ load_signed_short (dst, src); break; 79 case T_INT: __ ldrw (dst, src); break; 80 case T_LONG: __ ldr (dst, src); break; 81 case T_ADDRESS: __ ldr (dst, src); break; 82 case T_FLOAT: __ ldrs (v0, src); break; 83 case T_DOUBLE: __ ldrd (v0, src); break; 84 default: Unimplemented(); 85 } 86 } 87 88 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 89 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 90 bool in_heap = (decorators & IN_HEAP) != 0; 91 bool in_native = (decorators & IN_NATIVE) != 0; 92 bool is_not_null = (decorators & IS_NOT_NULL) != 0; 93 94 switch (type) { 95 case T_OBJECT: 96 case T_ARRAY: { 97 if (in_heap) { 98 if (val == noreg) { 99 assert(!is_not_null, "inconsistent access"); 100 if (UseCompressedOops) { 101 __ strw(zr, dst); 102 } else { 103 __ str(zr, dst); 104 } 105 } else { 106 if (UseCompressedOops) { 107 assert(!dst.uses(val), "not enough registers"); 108 if (is_not_null) { 109 __ encode_heap_oop_not_null(val); 110 } else { 111 __ encode_heap_oop(val); 112 } 113 __ strw(val, dst); 114 } else { 115 __ str(val, dst); 116 } 117 } 118 } else { 119 assert(in_native, "why else?"); 120 assert(val != noreg, "not supported"); 121 __ str(val, dst); 122 } 123 break; 124 } 125 case T_BOOLEAN: 126 __ andw(val, val, 0x1); // boolean is true if LSB is 1 127 __ strb(val, dst); 128 break; 129 case T_BYTE: __ strb(val, dst); break; 130 case T_CHAR: __ strh(val, dst); break; 131 case T_SHORT: __ strh(val, dst); break; 132 case T_INT: __ strw(val, dst); break; 133 case T_LONG: __ str (val, dst); break; 134 case T_ADDRESS: __ str (val, dst); break; 135 case T_FLOAT: __ strs(v0, dst); break; 136 case T_DOUBLE: __ strd(v0, dst); break; 137 default: Unimplemented(); 138 } 139 } 140 141 void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet decorators, 142 Register src, Register dst, Register inline_layout_info) { 143 // flat_field_copy implementation is fairly complex, and there are not any 144 // "short-cuts" to be made from asm. What there is, appears to have the same 145 // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds 146 // of hand-rolled instructions... 147 if (decorators & IS_DEST_UNINITIALIZED) { 148 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, inline_layout_info); 149 } else { 150 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, inline_layout_info); 151 } 152 } 153 154 void BarrierSetAssembler::copy_load_at(MacroAssembler* masm, 155 DecoratorSet decorators, 156 BasicType type, 157 size_t bytes, 158 Register dst1, 159 Register dst2, 160 Address src, 161 Register tmp) { 162 if (bytes == 1) { 163 assert(dst2 == noreg, "invariant"); 164 __ ldrb(dst1, src); 165 } else if (bytes == 2) { 166 assert(dst2 == noreg, "invariant"); 167 __ ldrh(dst1, src); 168 } else if (bytes == 4) { 169 assert(dst2 == noreg, "invariant"); 170 __ ldrw(dst1, src); 171 } else if (bytes == 8) { 172 assert(dst2 == noreg, "invariant"); 173 __ ldr(dst1, src); 174 } else if (bytes == 16) { 175 assert(dst2 != noreg, "invariant"); 176 assert(dst2 != dst1, "invariant"); 177 __ ldp(dst1, dst2, src); 178 } else { 179 // Not the right size 180 ShouldNotReachHere(); 181 } 182 if ((decorators & ARRAYCOPY_CHECKCAST) != 0 && UseCompressedOops) { 183 __ decode_heap_oop(dst1); 184 } 185 } 186 187 void BarrierSetAssembler::copy_store_at(MacroAssembler* masm, 188 DecoratorSet decorators, 189 BasicType type, 190 size_t bytes, 191 Address dst, 192 Register src1, 193 Register src2, 194 Register tmp1, 195 Register tmp2, 196 Register tmp3) { 197 if ((decorators & ARRAYCOPY_CHECKCAST) != 0 && UseCompressedOops) { 198 __ encode_heap_oop(src1); 199 } 200 if (bytes == 1) { 201 assert(src2 == noreg, "invariant"); 202 __ strb(src1, dst); 203 } else if (bytes == 2) { 204 assert(src2 == noreg, "invariant"); 205 __ strh(src1, dst); 206 } else if (bytes == 4) { 207 assert(src2 == noreg, "invariant"); 208 __ strw(src1, dst); 209 } else if (bytes == 8) { 210 assert(src2 == noreg, "invariant"); 211 __ str(src1, dst); 212 } else if (bytes == 16) { 213 assert(src2 != noreg, "invariant"); 214 assert(src2 != src1, "invariant"); 215 __ stp(src1, src2, dst); 216 } else { 217 // Not the right size 218 ShouldNotReachHere(); 219 } 220 } 221 222 void BarrierSetAssembler::copy_load_at(MacroAssembler* masm, 223 DecoratorSet decorators, 224 BasicType type, 225 size_t bytes, 226 FloatRegister dst1, 227 FloatRegister dst2, 228 Address src, 229 Register tmp1, 230 Register tmp2, 231 FloatRegister vec_tmp) { 232 if (bytes == 32) { 233 __ ldpq(dst1, dst2, src); 234 } else { 235 ShouldNotReachHere(); 236 } 237 } 238 239 void BarrierSetAssembler::copy_store_at(MacroAssembler* masm, 240 DecoratorSet decorators, 241 BasicType type, 242 size_t bytes, 243 Address dst, 244 FloatRegister src1, 245 FloatRegister src2, 246 Register tmp1, 247 Register tmp2, 248 Register tmp3, 249 FloatRegister vec_tmp1, 250 FloatRegister vec_tmp2, 251 FloatRegister vec_tmp3) { 252 if (bytes == 32) { 253 __ stpq(src1, src2, dst); 254 } else { 255 ShouldNotReachHere(); 256 } 257 } 258 259 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 260 Register obj, Register tmp, Label& slowpath) { 261 // If mask changes we need to ensure that the inverse is still encodable as an immediate 262 STATIC_ASSERT(JNIHandles::tag_mask == 0b11); 263 __ andr(obj, obj, ~JNIHandles::tag_mask); 264 __ ldr(obj, Address(obj, 0)); // *obj 265 } 266 267 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 268 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, 269 Register var_size_in_bytes, 270 int con_size_in_bytes, 271 Register t1, 272 Register t2, 273 Label& slow_case) { 274 assert_different_registers(obj, t2); 275 assert_different_registers(obj, var_size_in_bytes); 276 Register end = t2; 277 278 // verify_tlab(); 279 280 __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 281 if (var_size_in_bytes == noreg) { 282 __ lea(end, Address(obj, con_size_in_bytes)); 283 } else { 284 __ lea(end, Address(obj, var_size_in_bytes)); 285 } 286 __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 287 __ cmp(end, rscratch1); 288 __ br(Assembler::HI, slow_case); 289 290 // update the tlab top pointer 291 __ str(end, Address(rthread, JavaThread::tlab_top_offset())); 292 293 // recover var_size_in_bytes if necessary 294 if (var_size_in_bytes == end) { 295 __ sub(var_size_in_bytes, var_size_in_bytes, obj); 296 } 297 // verify_tlab(); 298 } 299 300 static volatile uint32_t _patching_epoch = 0; 301 302 address BarrierSetAssembler::patching_epoch_addr() { 303 return (address)&_patching_epoch; 304 } 305 306 void BarrierSetAssembler::increment_patching_epoch() { 307 Atomic::inc(&_patching_epoch); 308 } 309 310 void BarrierSetAssembler::clear_patching_epoch() { 311 _patching_epoch = 0; 312 } 313 314 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) { 315 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 316 317 if (bs_nm == nullptr) { 318 return; 319 } 320 321 Label local_guard; 322 Label skip_barrier; 323 NMethodPatchingType patching_type = nmethod_patching_type(); 324 325 if (slow_path == nullptr) { 326 guard = &local_guard; 327 } 328 329 // If the slow path is out of line in a stub, we flip the condition 330 Assembler::Condition condition = slow_path == nullptr ? Assembler::EQ : Assembler::NE; 331 Label& barrier_target = slow_path == nullptr ? skip_barrier : *slow_path; 332 333 __ ldrw(rscratch1, *guard); 334 335 if (patching_type == NMethodPatchingType::stw_instruction_and_data_patch) { 336 // With STW patching, no data or instructions are updated concurrently, 337 // which means there isn't really any need for any fencing for neither 338 // data nor instruction modifications happening concurrently. The 339 // instruction patching is handled with isb fences on the way back 340 // from the safepoint to Java. So here we can do a plain conditional 341 // branch with no fencing. 342 Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset())); 343 __ ldrw(rscratch2, thread_disarmed_addr); 344 __ cmp(rscratch1, rscratch2); 345 } else if (patching_type == NMethodPatchingType::conc_instruction_and_data_patch) { 346 // If we patch code we need both a code patching and a loadload 347 // fence. It's not super cheap, so we use a global epoch mechanism 348 // to hide them in a slow path. 349 // The high level idea of the global epoch mechanism is to detect 350 // when any thread has performed the required fencing, after the 351 // last nmethod was disarmed. This implies that the required 352 // fencing has been performed for all preceding nmethod disarms 353 // as well. Therefore, we do not need any further fencing. 354 __ lea(rscratch2, ExternalAddress((address)&_patching_epoch)); 355 // Embed an artificial data dependency to order the guard load 356 // before the epoch load. 357 __ orr(rscratch2, rscratch2, rscratch1, Assembler::LSR, 32); 358 // Read the global epoch value. 359 __ ldrw(rscratch2, rscratch2); 360 // Combine the guard value (low order) with the epoch value (high order). 361 __ orr(rscratch1, rscratch1, rscratch2, Assembler::LSL, 32); 362 // Compare the global values with the thread-local values. 363 Address thread_disarmed_and_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset())); 364 __ ldr(rscratch2, thread_disarmed_and_epoch_addr); 365 __ cmp(rscratch1, rscratch2); 366 } else { 367 assert(patching_type == NMethodPatchingType::conc_data_patch, "must be"); 368 // Subsequent loads of oops must occur after load of guard value. 369 // BarrierSetNMethod::disarm sets guard with release semantics. 370 __ membar(__ LoadLoad); 371 Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset())); 372 __ ldrw(rscratch2, thread_disarmed_addr); 373 __ cmpw(rscratch1, rscratch2); 374 } 375 __ br(condition, barrier_target); 376 377 if (slow_path == nullptr) { 378 __ lea(rscratch1, RuntimeAddress(StubRoutines::method_entry_barrier())); 379 __ blr(rscratch1); 380 __ b(skip_barrier); 381 382 __ bind(local_guard); 383 384 __ emit_int32(0); // nmethod guard value. Skipped over in common case. 385 } else { 386 __ bind(*continuation); 387 } 388 389 __ bind(skip_barrier); 390 } 391 392 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { 393 BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod(); 394 if (bs == nullptr) { 395 return; 396 } 397 398 Label bad_call; 399 __ cbz(rmethod, bad_call); 400 401 // Pointer chase to the method holder to find out if the method is concurrently unloading. 402 Label method_live; 403 __ load_method_holder_cld(rscratch1, rmethod); 404 405 // Is it a strong CLD? 406 __ ldrw(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_ref_count_offset())); 407 __ cbnz(rscratch2, method_live); 408 409 // Is it a weak but alive CLD? 410 __ push(RegSet::of(r10), sp); 411 __ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset())); 412 413 __ resolve_weak_handle(r10, rscratch1, rscratch2); 414 __ mov(rscratch1, r10); 415 __ pop(RegSet::of(r10), sp); 416 __ cbnz(rscratch1, method_live); 417 418 __ bind(bad_call); 419 420 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 421 __ bind(method_live); 422 } 423 424 void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { 425 // Check if the oop is in the right area of memory 426 __ mov(tmp2, (intptr_t) Universe::verify_oop_mask()); 427 __ andr(tmp1, obj, tmp2); 428 __ mov(tmp2, (intptr_t) Universe::verify_oop_bits()); 429 430 // Compare tmp1 and tmp2. We don't use a compare 431 // instruction here because the flags register is live. 432 __ eor(tmp1, tmp1, tmp2); 433 __ cbnz(tmp1, error); 434 435 // make sure klass is 'reasonable', which is not zero. 436 __ load_klass(obj, obj); // get klass 437 __ cbz(obj, error); // if klass is null it is broken 438 } 439 440 #ifdef COMPILER2 441 442 OptoReg::Name BarrierSetAssembler::encode_float_vector_register_size(const Node* node, OptoReg::Name opto_reg) { 443 switch (node->ideal_reg()) { 444 case Op_RegF: 445 // No need to refine. The original encoding is already fine to distinguish. 446 assert(opto_reg % 4 == 0, "Float register should only occupy a single slot"); 447 break; 448 // Use different encoding values of the same fp/vector register to help distinguish different sizes. 449 // Such as V16. The OptoReg::name and its corresponding slot value are 450 // "V16": 64, "V16_H": 65, "V16_J": 66, "V16_K": 67. 451 case Op_RegD: 452 case Op_VecD: 453 opto_reg &= ~3; 454 opto_reg |= 1; 455 break; 456 case Op_VecX: 457 opto_reg &= ~3; 458 opto_reg |= 2; 459 break; 460 case Op_VecA: 461 opto_reg &= ~3; 462 opto_reg |= 3; 463 break; 464 default: 465 assert(false, "unexpected ideal register"); 466 ShouldNotReachHere(); 467 } 468 return opto_reg; 469 } 470 471 OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { 472 if (!OptoReg::is_reg(opto_reg)) { 473 return OptoReg::Bad; 474 } 475 476 const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); 477 if (vm_reg->is_FloatRegister()) { 478 opto_reg = encode_float_vector_register_size(node, opto_reg); 479 } 480 481 return opto_reg; 482 } 483 484 #undef __ 485 #define __ _masm-> 486 487 void SaveLiveRegisters::initialize(BarrierStubC2* stub) { 488 int index = -1; 489 GrowableArray<RegisterData> registers; 490 VMReg prev_vm_reg = VMRegImpl::Bad(); 491 492 RegMaskIterator rmi(stub->preserve_set()); 493 while (rmi.has_next()) { 494 OptoReg::Name opto_reg = rmi.next(); 495 VMReg vm_reg = OptoReg::as_VMReg(opto_reg); 496 497 if (vm_reg->is_Register()) { 498 // GPR may have one or two slots in regmask 499 // Determine whether the current vm_reg is the same physical register as the previous one 500 if (is_same_register(vm_reg, prev_vm_reg)) { 501 registers.at(index)._slots++; 502 } else { 503 RegisterData reg_data = { vm_reg, 1 }; 504 index = registers.append(reg_data); 505 } 506 } else if (vm_reg->is_FloatRegister()) { 507 // We have size encoding in OptoReg of stub->preserve_set() 508 // After encoding, float/neon/sve register has only one slot in regmask 509 // Decode it to get the actual size 510 VMReg vm_reg_base = vm_reg->as_FloatRegister()->as_VMReg(); 511 int slots = decode_float_vector_register_size(opto_reg); 512 RegisterData reg_data = { vm_reg_base, slots }; 513 index = registers.append(reg_data); 514 } else if (vm_reg->is_PRegister()) { 515 // PRegister has only one slot in regmask 516 RegisterData reg_data = { vm_reg, 1 }; 517 index = registers.append(reg_data); 518 } else { 519 assert(false, "Unknown register type"); 520 ShouldNotReachHere(); 521 } 522 prev_vm_reg = vm_reg; 523 } 524 525 // Record registers that needs to be saved/restored 526 for (GrowableArrayIterator<RegisterData> it = registers.begin(); it != registers.end(); ++it) { 527 RegisterData reg_data = *it; 528 VMReg vm_reg = reg_data._reg; 529 int slots = reg_data._slots; 530 if (vm_reg->is_Register()) { 531 assert(slots == 1 || slots == 2, "Unexpected register save size"); 532 _gp_regs += RegSet::of(vm_reg->as_Register()); 533 } else if (vm_reg->is_FloatRegister()) { 534 if (slots == 1 || slots == 2) { 535 _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); 536 } else if (slots == 4) { 537 _neon_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); 538 } else { 539 assert(slots == Matcher::scalable_vector_reg_size(T_FLOAT), "Unexpected register save size"); 540 _sve_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); 541 } 542 } else { 543 assert(vm_reg->is_PRegister() && slots == 1, "Unknown register type"); 544 _p_regs += PRegSet::of(vm_reg->as_PRegister()); 545 } 546 } 547 548 // Remove C-ABI SOE registers and scratch regs 549 _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9); 550 551 // Remove C-ABI SOE fp registers 552 _fp_regs -= FloatRegSet::range(v8, v15); 553 } 554 555 enum RC SaveLiveRegisters::rc_class(VMReg reg) { 556 if (reg->is_reg()) { 557 if (reg->is_Register()) { 558 return rc_int; 559 } else if (reg->is_FloatRegister()) { 560 return rc_float; 561 } else if (reg->is_PRegister()) { 562 return rc_predicate; 563 } 564 } 565 if (reg->is_stack()) { 566 return rc_stack; 567 } 568 return rc_bad; 569 } 570 571 bool SaveLiveRegisters::is_same_register(VMReg reg1, VMReg reg2) { 572 if (reg1 == reg2) { 573 return true; 574 } 575 if (rc_class(reg1) == rc_class(reg2)) { 576 if (reg1->is_Register()) { 577 return reg1->as_Register() == reg2->as_Register(); 578 } else if (reg1->is_FloatRegister()) { 579 return reg1->as_FloatRegister() == reg2->as_FloatRegister(); 580 } else if (reg1->is_PRegister()) { 581 return reg1->as_PRegister() == reg2->as_PRegister(); 582 } 583 } 584 return false; 585 } 586 587 int SaveLiveRegisters::decode_float_vector_register_size(OptoReg::Name opto_reg) { 588 switch (opto_reg & 3) { 589 case 0: 590 return 1; 591 case 1: 592 return 2; 593 case 2: 594 return 4; 595 case 3: 596 return Matcher::scalable_vector_reg_size(T_FLOAT); 597 default: 598 ShouldNotReachHere(); 599 return 0; 600 } 601 } 602 603 SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub) 604 : _masm(masm), 605 _gp_regs(), 606 _fp_regs(), 607 _neon_regs(), 608 _sve_regs(), 609 _p_regs() { 610 611 // Figure out what registers to save/restore 612 initialize(stub); 613 614 // Save registers 615 __ push(_gp_regs, sp); 616 __ push_fp(_fp_regs, sp, MacroAssembler::PushPopFp); 617 __ push_fp(_neon_regs, sp, MacroAssembler::PushPopNeon); 618 __ push_fp(_sve_regs, sp, MacroAssembler::PushPopSVE); 619 __ push_p(_p_regs, sp); 620 } 621 622 SaveLiveRegisters::~SaveLiveRegisters() { 623 // Restore registers 624 __ pop_p(_p_regs, sp); 625 __ pop_fp(_sve_regs, sp, MacroAssembler::PushPopSVE); 626 __ pop_fp(_neon_regs, sp, MacroAssembler::PushPopNeon); 627 __ pop_fp(_fp_regs, sp, MacroAssembler::PushPopFp); 628 629 // External runtime call may clobber ptrue reg 630 __ reinitialize_ptrue(); 631 632 __ pop(_gp_regs, sp); 633 } 634 635 #endif // COMPILER2