1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "code/compiledIC.hpp" 31 #include "compiler/compileTask.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "gc/shared/barrierSetAssembler.hpp" 36 #include "gc/shared/cardTableBarrierSet.hpp" 37 #include "gc/shared/cardTable.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/tlab_globals.hpp" 40 #include "interpreter/bytecodeHistogram.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "interpreter/interpreterRuntime.hpp" 43 #include "jvm.h" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "nativeInst_aarch64.hpp" 47 #include "oops/accessDecorators.hpp" 48 #include "oops/compressedKlass.inline.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/klass.inline.hpp" 51 #include "runtime/continuation.hpp" 52 #include "runtime/icache.hpp" 53 #include "runtime/interfaceSupport.inline.hpp" 54 #include "runtime/javaThread.hpp" 55 #include "runtime/jniHandles.inline.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/stubRoutines.hpp" 58 #include "utilities/globalDefinitions.hpp" 59 #include "utilities/powerOfTwo.hpp" 60 #ifdef COMPILER1 61 #include "c1/c1_LIRAssembler.hpp" 62 #endif 63 #ifdef COMPILER2 64 #include "oops/oop.hpp" 65 #include "opto/compile.hpp" 66 #include "opto/node.hpp" 67 #include "opto/output.hpp" 68 #endif 69 70 #include <sys/types.h> 71 72 #ifdef PRODUCT 73 #define BLOCK_COMMENT(str) /* nothing */ 74 #else 75 #define BLOCK_COMMENT(str) block_comment(str) 76 #endif 77 #define STOP(str) stop(str); 78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 79 80 #ifdef ASSERT 81 extern "C" void disnm(intptr_t p); 82 #endif 83 // Target-dependent relocation processing 84 // 85 // Instruction sequences whose target may need to be retrieved or 86 // patched are distinguished by their leading instruction, sorting 87 // them into three main instruction groups and related subgroups. 88 // 89 // 1) Branch, Exception and System (insn count = 1) 90 // 1a) Unconditional branch (immediate): 91 // b/bl imm19 92 // 1b) Compare & branch (immediate): 93 // cbz/cbnz Rt imm19 94 // 1c) Test & branch (immediate): 95 // tbz/tbnz Rt imm14 96 // 1d) Conditional branch (immediate): 97 // b.cond imm19 98 // 99 // 2) Loads and Stores (insn count = 1) 100 // 2a) Load register literal: 101 // ldr Rt imm19 102 // 103 // 3) Data Processing Immediate (insn count = 2 or 3) 104 // 3a) PC-rel. addressing 105 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 106 // adr/adrp Rx imm21; add Ry Rx #imm12 107 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 108 // adr/adrp Rx imm21 109 // adr/adrp Rx imm21; movk Rx #imm16<<32 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 111 // The latter form can only happen when the target is an 112 // ExternalAddress, and (by definition) ExternalAddresses don't 113 // move. Because of that property, there is never any need to 114 // patch the last of the three instructions. However, 115 // MacroAssembler::target_addr_for_insn takes all three 116 // instructions into account and returns the correct address. 117 // 3b) Move wide (immediate) 118 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 119 // 120 // A switch on a subset of the instruction's bits provides an 121 // efficient dispatch to these subcases. 122 // 123 // insn[28:26] -> main group ('x' == don't care) 124 // 00x -> UNALLOCATED 125 // 100 -> Data Processing Immediate 126 // 101 -> Branch, Exception and System 127 // x1x -> Loads and Stores 128 // 129 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 130 // n.b. in some cases extra bits need to be checked to verify the 131 // instruction is as expected 132 // 133 // 1) ... xx101x Branch, Exception and System 134 // 1a) 00___x Unconditional branch (immediate) 135 // 1b) 01___0 Compare & branch (immediate) 136 // 1c) 01___1 Test & branch (immediate) 137 // 1d) 10___0 Conditional branch (immediate) 138 // other Should not happen 139 // 140 // 2) ... xxx1x0 Loads and Stores 141 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 142 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 143 // strictly should be 64 bit non-FP/SIMD i.e. 144 // 0101_000 (i.e. requires insn[31:24] == 01011000) 145 // 146 // 3) ... xx100x Data Processing Immediate 147 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 148 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 149 // strictly should be 64 bit movz #imm16<<0 150 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 151 // 152 class RelocActions { 153 protected: 154 typedef int (*reloc_insn)(address insn_addr, address &target); 155 156 virtual reloc_insn adrpMem() = 0; 157 virtual reloc_insn adrpAdd() = 0; 158 virtual reloc_insn adrpMovk() = 0; 159 160 const address _insn_addr; 161 const uint32_t _insn; 162 163 static uint32_t insn_at(address insn_addr, int n) { 164 return ((uint32_t*)insn_addr)[n]; 165 } 166 uint32_t insn_at(int n) const { 167 return insn_at(_insn_addr, n); 168 } 169 170 public: 171 172 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 173 RelocActions(address insn_addr, uint32_t insn) 174 : _insn_addr(insn_addr), _insn(insn) {} 175 176 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 177 virtual int conditionalBranch(address insn_addr, address &target) = 0; 178 virtual int testAndBranch(address insn_addr, address &target) = 0; 179 virtual int loadStore(address insn_addr, address &target) = 0; 180 virtual int adr(address insn_addr, address &target) = 0; 181 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 182 virtual int immediate(address insn_addr, address &target) = 0; 183 virtual void verify(address insn_addr, address &target) = 0; 184 185 int ALWAYSINLINE run(address insn_addr, address &target) { 186 int instructions = 1; 187 188 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 189 switch(dispatch) { 190 case 0b001010: 191 case 0b001011: { 192 instructions = unconditionalBranch(insn_addr, target); 193 break; 194 } 195 case 0b101010: // Conditional branch (immediate) 196 case 0b011010: { // Compare & branch (immediate) 197 instructions = conditionalBranch(insn_addr, target); 198 break; 199 } 200 case 0b011011: { 201 instructions = testAndBranch(insn_addr, target); 202 break; 203 } 204 case 0b001100: 205 case 0b001110: 206 case 0b011100: 207 case 0b011110: 208 case 0b101100: 209 case 0b101110: 210 case 0b111100: 211 case 0b111110: { 212 // load/store 213 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 214 // Load register (literal) 215 instructions = loadStore(insn_addr, target); 216 break; 217 } else { 218 // nothing to do 219 assert(target == 0, "did not expect to relocate target for polling page load"); 220 } 221 break; 222 } 223 case 0b001000: 224 case 0b011000: 225 case 0b101000: 226 case 0b111000: { 227 // adr/adrp 228 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 229 int shift = Instruction_aarch64::extract(_insn, 31, 31); 230 if (shift) { 231 uint32_t insn2 = insn_at(1); 232 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 233 Instruction_aarch64::extract(_insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 9, 5)) { 235 instructions = adrp(insn_addr, target, adrpMem()); 236 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 237 Instruction_aarch64::extract(_insn, 4, 0) == 238 Instruction_aarch64::extract(insn2, 4, 0)) { 239 instructions = adrp(insn_addr, target, adrpAdd()); 240 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 241 Instruction_aarch64::extract(_insn, 4, 0) == 242 Instruction_aarch64::extract(insn2, 4, 0)) { 243 instructions = adrp(insn_addr, target, adrpMovk()); 244 } else { 245 ShouldNotReachHere(); 246 } 247 } else { 248 instructions = adr(insn_addr, target); 249 } 250 break; 251 } 252 case 0b001001: 253 case 0b011001: 254 case 0b101001: 255 case 0b111001: { 256 instructions = immediate(insn_addr, target); 257 break; 258 } 259 default: { 260 ShouldNotReachHere(); 261 } 262 } 263 264 verify(insn_addr, target); 265 return instructions * NativeInstruction::instruction_size; 266 } 267 }; 268 269 class Patcher : public RelocActions { 270 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 271 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 272 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 273 274 public: 275 Patcher(address insn_addr) : RelocActions(insn_addr) {} 276 277 virtual int unconditionalBranch(address insn_addr, address &target) { 278 intptr_t offset = (target - insn_addr) >> 2; 279 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 280 return 1; 281 } 282 virtual int conditionalBranch(address insn_addr, address &target) { 283 intptr_t offset = (target - insn_addr) >> 2; 284 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 285 return 1; 286 } 287 virtual int testAndBranch(address insn_addr, address &target) { 288 intptr_t offset = (target - insn_addr) >> 2; 289 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 290 return 1; 291 } 292 virtual int loadStore(address insn_addr, address &target) { 293 intptr_t offset = (target - insn_addr) >> 2; 294 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 295 return 1; 296 } 297 virtual int adr(address insn_addr, address &target) { 298 #ifdef ASSERT 299 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 300 #endif 301 // PC-rel. addressing 302 ptrdiff_t offset = target - insn_addr; 303 int offset_lo = offset & 3; 304 offset >>= 2; 305 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 306 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 307 return 1; 308 } 309 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 310 int instructions = 1; 311 #ifdef ASSERT 312 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 313 #endif 314 ptrdiff_t offset = target - insn_addr; 315 instructions = 2; 316 precond(inner != nullptr); 317 // Give the inner reloc a chance to modify the target. 318 address adjusted_target = target; 319 instructions = (*inner)(insn_addr, adjusted_target); 320 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 321 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 322 offset = adr_page - pc_page; 323 int offset_lo = offset & 3; 324 offset >>= 2; 325 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 326 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 327 return instructions; 328 } 329 static int adrpMem_impl(address insn_addr, address &target) { 330 uintptr_t dest = (uintptr_t)target; 331 int offset_lo = dest & 0xfff; 332 uint32_t insn2 = insn_at(insn_addr, 1); 333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 335 guarantee(((dest >> size) << size) == dest, "misaligned target"); 336 return 2; 337 } 338 static int adrpAdd_impl(address insn_addr, address &target) { 339 uintptr_t dest = (uintptr_t)target; 340 int offset_lo = dest & 0xfff; 341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 342 return 2; 343 } 344 static int adrpMovk_impl(address insn_addr, address &target) { 345 uintptr_t dest = uintptr_t(target); 346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 348 target = address(dest); 349 return 2; 350 } 351 virtual int immediate(address insn_addr, address &target) { 352 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 353 uint64_t dest = (uint64_t)target; 354 // Move wide constant 355 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 356 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 357 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 358 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 359 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 360 return 3; 361 } 362 virtual void verify(address insn_addr, address &target) { 363 #ifdef ASSERT 364 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 365 if (!(address_is == target)) { 366 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 367 disnm((intptr_t)insn_addr); 368 assert(address_is == target, "should be"); 369 } 370 #endif 371 } 372 }; 373 374 // If insn1 and insn2 use the same register to form an address, either 375 // by an offsetted LDR or a simple ADD, return the offset. If the 376 // second instruction is an LDR, the offset may be scaled. 377 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 378 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 379 Instruction_aarch64::extract(insn1, 4, 0) == 380 Instruction_aarch64::extract(insn2, 9, 5)) { 381 // Load/store register (unsigned immediate) 382 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 383 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 384 byte_offset <<= size; 385 return true; 386 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 387 Instruction_aarch64::extract(insn1, 4, 0) == 388 Instruction_aarch64::extract(insn2, 4, 0)) { 389 // add (immediate) 390 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 391 return true; 392 } 393 return false; 394 } 395 396 class AArch64Decoder : public RelocActions { 397 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 398 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 399 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 400 401 public: 402 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 403 404 virtual int loadStore(address insn_addr, address &target) { 405 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 406 target = insn_addr + (offset << 2); 407 return 1; 408 } 409 virtual int unconditionalBranch(address insn_addr, address &target) { 410 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 411 target = insn_addr + (offset << 2); 412 return 1; 413 } 414 virtual int conditionalBranch(address insn_addr, address &target) { 415 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 416 target = address(((uint64_t)insn_addr + (offset << 2))); 417 return 1; 418 } 419 virtual int testAndBranch(address insn_addr, address &target) { 420 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 421 target = address(((uint64_t)insn_addr + (offset << 2))); 422 return 1; 423 } 424 virtual int adr(address insn_addr, address &target) { 425 // PC-rel. addressing 426 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 427 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 428 target = address((uint64_t)insn_addr + offset); 429 return 1; 430 } 431 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 432 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 433 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 434 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 435 int shift = 12; 436 offset <<= shift; 437 uint64_t target_page = ((uint64_t)insn_addr) + offset; 438 target_page &= ((uint64_t)-1) << shift; 439 uint32_t insn2 = insn_at(1); 440 target = address(target_page); 441 precond(inner != nullptr); 442 (*inner)(insn_addr, target); 443 return 2; 444 } 445 static int adrpMem_impl(address insn_addr, address &target) { 446 uint32_t insn2 = insn_at(insn_addr, 1); 447 // Load/store register (unsigned immediate) 448 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 449 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 450 byte_offset <<= size; 451 target += byte_offset; 452 return 2; 453 } 454 static int adrpAdd_impl(address insn_addr, address &target) { 455 uint32_t insn2 = insn_at(insn_addr, 1); 456 // add (immediate) 457 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 458 target += byte_offset; 459 return 2; 460 } 461 static int adrpMovk_impl(address insn_addr, address &target) { 462 uint32_t insn2 = insn_at(insn_addr, 1); 463 uint64_t dest = uint64_t(target); 464 dest = (dest & 0xffff0000ffffffff) | 465 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 466 target = address(dest); 467 468 // We know the destination 4k page. Maybe we have a third 469 // instruction. 470 uint32_t insn = insn_at(insn_addr, 0); 471 uint32_t insn3 = insn_at(insn_addr, 2); 472 ptrdiff_t byte_offset; 473 if (offset_for(insn, insn3, byte_offset)) { 474 target += byte_offset; 475 return 3; 476 } else { 477 return 2; 478 } 479 } 480 virtual int immediate(address insn_addr, address &target) { 481 uint32_t *insns = (uint32_t *)insn_addr; 482 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 483 // Move wide constant: movz, movk, movk. See movptr(). 484 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 485 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 486 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 487 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 488 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 489 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 490 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 491 return 3; 492 } 493 virtual void verify(address insn_addr, address &target) { 494 } 495 }; 496 497 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 498 AArch64Decoder decoder(insn_addr, insn); 499 address target; 500 decoder.run(insn_addr, target); 501 return target; 502 } 503 504 // Patch any kind of instruction; there may be several instructions. 505 // Return the total length (in bytes) of the instructions. 506 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 507 Patcher patcher(insn_addr); 508 return patcher.run(insn_addr, target); 509 } 510 511 int MacroAssembler::patch_oop(address insn_addr, address o) { 512 int instructions; 513 unsigned insn = *(unsigned*)insn_addr; 514 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 515 516 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 517 // narrow OOPs by setting the upper 16 bits in the first 518 // instruction. 519 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 520 // Move narrow OOP 521 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 522 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 523 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 524 instructions = 2; 525 } else { 526 // Move wide OOP 527 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 528 uintptr_t dest = (uintptr_t)o; 529 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 530 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 531 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 532 instructions = 3; 533 } 534 return instructions * NativeInstruction::instruction_size; 535 } 536 537 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 538 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 539 // We encode narrow ones by setting the upper 16 bits in the first 540 // instruction. 541 NativeInstruction *insn = nativeInstruction_at(insn_addr); 542 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 543 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 544 545 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 546 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 547 return 2 * NativeInstruction::instruction_size; 548 } 549 550 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 551 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 552 return nullptr; 553 } 554 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 555 } 556 557 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 558 if (acquire) { 559 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 560 ldar(tmp, tmp); 561 } else { 562 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 563 } 564 if (at_return) { 565 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 566 // we may safely use the sp instead to perform the stack watermark check. 567 cmp(in_nmethod ? sp : rfp, tmp); 568 br(Assembler::HI, slow_path); 569 } else { 570 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 571 } 572 } 573 574 void MacroAssembler::rt_call(address dest, Register tmp) { 575 CodeBlob *cb = CodeCache::find_blob(dest); 576 if (cb) { 577 far_call(RuntimeAddress(dest)); 578 } else { 579 lea(tmp, RuntimeAddress(dest)); 580 blr(tmp); 581 } 582 } 583 584 void MacroAssembler::push_cont_fastpath(Register java_thread) { 585 if (!Continuations::enabled()) return; 586 Label done; 587 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 588 cmp(sp, rscratch1); 589 br(Assembler::LS, done); 590 mov(rscratch1, sp); // we can't use sp as the source in str 591 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 592 bind(done); 593 } 594 595 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 596 if (!Continuations::enabled()) return; 597 Label done; 598 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 599 cmp(sp, rscratch1); 600 br(Assembler::LO, done); 601 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 602 bind(done); 603 } 604 605 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 606 // we must set sp to zero to clear frame 607 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 608 609 // must clear fp, so that compiled frames are not confused; it is 610 // possible that we need it only for debugging 611 if (clear_fp) { 612 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 613 } 614 615 // Always clear the pc because it could have been set by make_walkable() 616 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 617 } 618 619 // Calls to C land 620 // 621 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 622 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 623 // has to be reset to 0. This is required to allow proper stack traversal. 624 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 625 Register last_java_fp, 626 Register last_java_pc, 627 Register scratch) { 628 629 if (last_java_pc->is_valid()) { 630 str(last_java_pc, Address(rthread, 631 JavaThread::frame_anchor_offset() 632 + JavaFrameAnchor::last_Java_pc_offset())); 633 } 634 635 // determine last_java_sp register 636 if (last_java_sp == sp) { 637 mov(scratch, sp); 638 last_java_sp = scratch; 639 } else if (!last_java_sp->is_valid()) { 640 last_java_sp = esp; 641 } 642 643 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 644 645 // last_java_fp is optional 646 if (last_java_fp->is_valid()) { 647 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 648 } 649 } 650 651 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 652 Register last_java_fp, 653 address last_java_pc, 654 Register scratch) { 655 assert(last_java_pc != nullptr, "must provide a valid PC"); 656 657 adr(scratch, last_java_pc); 658 str(scratch, Address(rthread, 659 JavaThread::frame_anchor_offset() 660 + JavaFrameAnchor::last_Java_pc_offset())); 661 662 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 663 } 664 665 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 666 Register last_java_fp, 667 Label &L, 668 Register scratch) { 669 if (L.is_bound()) { 670 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 671 } else { 672 InstructionMark im(this); 673 L.add_patch_at(code(), locator()); 674 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 675 } 676 } 677 678 static inline bool target_needs_far_branch(address addr) { 679 // codecache size <= 128M 680 if (!MacroAssembler::far_branches()) { 681 return false; 682 } 683 // codecache size > 240M 684 if (MacroAssembler::codestub_branch_needs_far_jump()) { 685 return true; 686 } 687 // codecache size: 128M..240M 688 return !CodeCache::is_non_nmethod(addr); 689 } 690 691 void MacroAssembler::far_call(Address entry, Register tmp) { 692 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 693 assert(CodeCache::find_blob(entry.target()) != nullptr, 694 "destination of far call not found in code cache"); 695 assert(entry.rspec().type() == relocInfo::external_word_type 696 || entry.rspec().type() == relocInfo::runtime_call_type 697 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 698 if (target_needs_far_branch(entry.target())) { 699 uint64_t offset; 700 // We can use ADRP here because we know that the total size of 701 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 702 adrp(tmp, entry, offset); 703 add(tmp, tmp, offset); 704 blr(tmp); 705 } else { 706 bl(entry); 707 } 708 } 709 710 int MacroAssembler::far_jump(Address entry, Register tmp) { 711 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 712 assert(CodeCache::find_blob(entry.target()) != nullptr, 713 "destination of far call not found in code cache"); 714 assert(entry.rspec().type() == relocInfo::external_word_type 715 || entry.rspec().type() == relocInfo::runtime_call_type 716 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 717 address start = pc(); 718 if (target_needs_far_branch(entry.target())) { 719 uint64_t offset; 720 // We can use ADRP here because we know that the total size of 721 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 722 adrp(tmp, entry, offset); 723 add(tmp, tmp, offset); 724 br(tmp); 725 } else { 726 b(entry); 727 } 728 return pc() - start; 729 } 730 731 void MacroAssembler::reserved_stack_check() { 732 // testing if reserved zone needs to be enabled 733 Label no_reserved_zone_enabling; 734 735 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 736 cmp(sp, rscratch1); 737 br(Assembler::LO, no_reserved_zone_enabling); 738 739 enter(); // LR and FP are live. 740 lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); 741 mov(c_rarg0, rthread); 742 blr(rscratch1); 743 leave(); 744 745 // We have already removed our own frame. 746 // throw_delayed_StackOverflowError will think that it's been 747 // called by our caller. 748 lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 749 br(rscratch1); 750 should_not_reach_here(); 751 752 bind(no_reserved_zone_enabling); 753 } 754 755 static void pass_arg0(MacroAssembler* masm, Register arg) { 756 if (c_rarg0 != arg ) { 757 masm->mov(c_rarg0, arg); 758 } 759 } 760 761 static void pass_arg1(MacroAssembler* masm, Register arg) { 762 if (c_rarg1 != arg ) { 763 masm->mov(c_rarg1, arg); 764 } 765 } 766 767 static void pass_arg2(MacroAssembler* masm, Register arg) { 768 if (c_rarg2 != arg ) { 769 masm->mov(c_rarg2, arg); 770 } 771 } 772 773 static void pass_arg3(MacroAssembler* masm, Register arg) { 774 if (c_rarg3 != arg ) { 775 masm->mov(c_rarg3, arg); 776 } 777 } 778 779 void MacroAssembler::call_VM_base(Register oop_result, 780 Register java_thread, 781 Register last_java_sp, 782 address entry_point, 783 int number_of_arguments, 784 bool check_exceptions) { 785 // determine java_thread register 786 if (!java_thread->is_valid()) { 787 java_thread = rthread; 788 } 789 790 // determine last_java_sp register 791 if (!last_java_sp->is_valid()) { 792 last_java_sp = esp; 793 } 794 795 // debugging support 796 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 797 assert(java_thread == rthread, "unexpected register"); 798 #ifdef ASSERT 799 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 800 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 801 #endif // ASSERT 802 803 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 804 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 805 806 // push java thread (becomes first argument of C function) 807 808 mov(c_rarg0, java_thread); 809 810 // set last Java frame before call 811 assert(last_java_sp != rfp, "can't use rfp"); 812 813 Label l; 814 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 815 816 // do the call, remove parameters 817 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 818 819 // lr could be poisoned with PAC signature during throw_pending_exception 820 // if it was tail-call optimized by compiler, since lr is not callee-saved 821 // reload it with proper value 822 adr(lr, l); 823 824 // reset last Java frame 825 // Only interpreter should have to clear fp 826 reset_last_Java_frame(true); 827 828 // C++ interp handles this in the interpreter 829 check_and_handle_popframe(java_thread); 830 check_and_handle_earlyret(java_thread); 831 832 if (check_exceptions) { 833 // check for pending exceptions (java_thread is set upon return) 834 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 835 Label ok; 836 cbz(rscratch1, ok); 837 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 838 br(rscratch1); 839 bind(ok); 840 } 841 842 // get oop result if there is one and reset the value in the thread 843 if (oop_result->is_valid()) { 844 get_vm_result(oop_result, java_thread); 845 } 846 } 847 848 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 849 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 850 } 851 852 // Check the entry target is always reachable from any branch. 853 static bool is_always_within_branch_range(Address entry) { 854 const address target = entry.target(); 855 856 if (!CodeCache::contains(target)) { 857 // We always use trampolines for callees outside CodeCache. 858 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 859 return false; 860 } 861 862 if (!MacroAssembler::far_branches()) { 863 return true; 864 } 865 866 if (entry.rspec().type() == relocInfo::runtime_call_type) { 867 // Runtime calls are calls of a non-compiled method (stubs, adapters). 868 // Non-compiled methods stay forever in CodeCache. 869 // We check whether the longest possible branch is within the branch range. 870 assert(CodeCache::find_blob(target) != nullptr && 871 !CodeCache::find_blob(target)->is_nmethod(), 872 "runtime call of compiled method"); 873 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 874 const address left_longest_branch_start = CodeCache::low_bound(); 875 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 876 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 877 return is_reachable; 878 } 879 880 return false; 881 } 882 883 // Maybe emit a call via a trampoline. If the code cache is small 884 // trampolines won't be emitted. 885 address MacroAssembler::trampoline_call(Address entry) { 886 assert(entry.rspec().type() == relocInfo::runtime_call_type 887 || entry.rspec().type() == relocInfo::opt_virtual_call_type 888 || entry.rspec().type() == relocInfo::static_call_type 889 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 890 891 address target = entry.target(); 892 893 if (!is_always_within_branch_range(entry)) { 894 if (!in_scratch_emit_size()) { 895 // We don't want to emit a trampoline if C2 is generating dummy 896 // code during its branch shortening phase. 897 if (entry.rspec().type() == relocInfo::runtime_call_type) { 898 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 899 code()->share_trampoline_for(entry.target(), offset()); 900 } else { 901 address stub = emit_trampoline_stub(offset(), target); 902 if (stub == nullptr) { 903 postcond(pc() == badAddress); 904 return nullptr; // CodeCache is full 905 } 906 } 907 } 908 target = pc(); 909 } 910 911 address call_pc = pc(); 912 relocate(entry.rspec()); 913 bl(target); 914 915 postcond(pc() != badAddress); 916 return call_pc; 917 } 918 919 // Emit a trampoline stub for a call to a target which is too far away. 920 // 921 // code sequences: 922 // 923 // call-site: 924 // branch-and-link to <destination> or <trampoline stub> 925 // 926 // Related trampoline stub for this call site in the stub section: 927 // load the call target from the constant pool 928 // branch (LR still points to the call site above) 929 930 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 931 address dest) { 932 // Max stub size: alignment nop, TrampolineStub. 933 address stub = start_a_stub(max_trampoline_stub_size()); 934 if (stub == nullptr) { 935 return nullptr; // CodeBuffer::expand failed 936 } 937 938 // Create a trampoline stub relocation which relates this trampoline stub 939 // with the call instruction at insts_call_instruction_offset in the 940 // instructions code-section. 941 align(wordSize); 942 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 943 + insts_call_instruction_offset)); 944 const int stub_start_offset = offset(); 945 946 // Now, create the trampoline stub's code: 947 // - load the call 948 // - call 949 Label target; 950 ldr(rscratch1, target); 951 br(rscratch1); 952 bind(target); 953 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 954 "should be"); 955 emit_int64((int64_t)dest); 956 957 const address stub_start_addr = addr_at(stub_start_offset); 958 959 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 960 961 end_a_stub(); 962 return stub_start_addr; 963 } 964 965 int MacroAssembler::max_trampoline_stub_size() { 966 // Max stub size: alignment nop, TrampolineStub. 967 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 968 } 969 970 void MacroAssembler::emit_static_call_stub() { 971 // CompiledDirectCall::set_to_interpreted knows the 972 // exact layout of this stub. 973 974 isb(); 975 mov_metadata(rmethod, nullptr); 976 977 // Jump to the entry point of the c2i stub. 978 movptr(rscratch1, 0); 979 br(rscratch1); 980 } 981 982 int MacroAssembler::static_call_stub_size() { 983 // isb; movk; movz; movz; movk; movz; movz; br 984 return 8 * NativeInstruction::instruction_size; 985 } 986 987 void MacroAssembler::c2bool(Register x) { 988 // implements x == 0 ? 0 : 1 989 // note: must only look at least-significant byte of x 990 // since C-style booleans are stored in one byte 991 // only! (was bug) 992 tst(x, 0xff); 993 cset(x, Assembler::NE); 994 } 995 996 address MacroAssembler::ic_call(address entry, jint method_index) { 997 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 998 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 999 // uintptr_t offset; 1000 // ldr_constant(rscratch2, const_ptr); 1001 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1002 return trampoline_call(Address(entry, rh)); 1003 } 1004 1005 int MacroAssembler::ic_check_size() { 1006 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1007 return NativeInstruction::instruction_size * 7; 1008 } else { 1009 return NativeInstruction::instruction_size * 5; 1010 } 1011 } 1012 1013 int MacroAssembler::ic_check(int end_alignment) { 1014 Register receiver = j_rarg0; 1015 Register data = rscratch2; 1016 Register tmp1 = rscratch1; 1017 Register tmp2 = r10; 1018 1019 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1020 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1021 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1022 // before the inline cache check here, and not after 1023 align(end_alignment, offset() + ic_check_size()); 1024 1025 int uep_offset = offset(); 1026 1027 if (UseCompressedClassPointers) { 1028 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1029 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1030 cmpw(tmp1, tmp2); 1031 } else { 1032 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1033 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1034 cmp(tmp1, tmp2); 1035 } 1036 1037 Label dont; 1038 br(Assembler::EQ, dont); 1039 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1040 bind(dont); 1041 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1042 1043 return uep_offset; 1044 } 1045 1046 // Implementation of call_VM versions 1047 1048 void MacroAssembler::call_VM(Register oop_result, 1049 address entry_point, 1050 bool check_exceptions) { 1051 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1052 } 1053 1054 void MacroAssembler::call_VM(Register oop_result, 1055 address entry_point, 1056 Register arg_1, 1057 bool check_exceptions) { 1058 pass_arg1(this, arg_1); 1059 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1060 } 1061 1062 void MacroAssembler::call_VM(Register oop_result, 1063 address entry_point, 1064 Register arg_1, 1065 Register arg_2, 1066 bool check_exceptions) { 1067 assert_different_registers(arg_1, c_rarg2); 1068 pass_arg2(this, arg_2); 1069 pass_arg1(this, arg_1); 1070 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1071 } 1072 1073 void MacroAssembler::call_VM(Register oop_result, 1074 address entry_point, 1075 Register arg_1, 1076 Register arg_2, 1077 Register arg_3, 1078 bool check_exceptions) { 1079 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1080 assert_different_registers(arg_2, c_rarg3); 1081 pass_arg3(this, arg_3); 1082 1083 pass_arg2(this, arg_2); 1084 1085 pass_arg1(this, arg_1); 1086 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1087 } 1088 1089 void MacroAssembler::call_VM(Register oop_result, 1090 Register last_java_sp, 1091 address entry_point, 1092 int number_of_arguments, 1093 bool check_exceptions) { 1094 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1095 } 1096 1097 void MacroAssembler::call_VM(Register oop_result, 1098 Register last_java_sp, 1099 address entry_point, 1100 Register arg_1, 1101 bool check_exceptions) { 1102 pass_arg1(this, arg_1); 1103 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1104 } 1105 1106 void MacroAssembler::call_VM(Register oop_result, 1107 Register last_java_sp, 1108 address entry_point, 1109 Register arg_1, 1110 Register arg_2, 1111 bool check_exceptions) { 1112 1113 assert_different_registers(arg_1, c_rarg2); 1114 pass_arg2(this, arg_2); 1115 pass_arg1(this, arg_1); 1116 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1117 } 1118 1119 void MacroAssembler::call_VM(Register oop_result, 1120 Register last_java_sp, 1121 address entry_point, 1122 Register arg_1, 1123 Register arg_2, 1124 Register arg_3, 1125 bool check_exceptions) { 1126 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1127 assert_different_registers(arg_2, c_rarg3); 1128 pass_arg3(this, arg_3); 1129 pass_arg2(this, arg_2); 1130 pass_arg1(this, arg_1); 1131 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1132 } 1133 1134 1135 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1136 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1137 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1138 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1139 } 1140 1141 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1142 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1143 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1144 } 1145 1146 void MacroAssembler::align(int modulus) { 1147 align(modulus, offset()); 1148 } 1149 1150 // Ensure that the code at target bytes offset from the current offset() is aligned 1151 // according to modulus. 1152 void MacroAssembler::align(int modulus, int target) { 1153 int delta = target - offset(); 1154 while ((offset() + delta) % modulus != 0) nop(); 1155 } 1156 1157 void MacroAssembler::post_call_nop() { 1158 if (!Continuations::enabled()) { 1159 return; 1160 } 1161 InstructionMark im(this); 1162 relocate(post_call_nop_Relocation::spec()); 1163 InlineSkippedInstructionsCounter skipCounter(this); 1164 nop(); 1165 movk(zr, 0); 1166 movk(zr, 0); 1167 } 1168 1169 // these are no-ops overridden by InterpreterMacroAssembler 1170 1171 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1172 1173 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1174 1175 // Look up the method for a megamorphic invokeinterface call. 1176 // The target method is determined by <intf_klass, itable_index>. 1177 // The receiver klass is in recv_klass. 1178 // On success, the result will be in method_result, and execution falls through. 1179 // On failure, execution transfers to the given label. 1180 void MacroAssembler::lookup_interface_method(Register recv_klass, 1181 Register intf_klass, 1182 RegisterOrConstant itable_index, 1183 Register method_result, 1184 Register scan_temp, 1185 Label& L_no_such_interface, 1186 bool return_method) { 1187 assert_different_registers(recv_klass, intf_klass, scan_temp); 1188 assert_different_registers(method_result, intf_klass, scan_temp); 1189 assert(recv_klass != method_result || !return_method, 1190 "recv_klass can be destroyed when method isn't needed"); 1191 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1192 "caller must use same register for non-constant itable index as for method"); 1193 1194 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1195 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1196 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1197 int scan_step = itableOffsetEntry::size() * wordSize; 1198 int vte_size = vtableEntry::size_in_bytes(); 1199 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1200 1201 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1202 1203 // Could store the aligned, prescaled offset in the klass. 1204 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1205 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1206 add(scan_temp, scan_temp, vtable_base); 1207 1208 if (return_method) { 1209 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1210 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1211 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1212 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1213 if (itentry_off) 1214 add(recv_klass, recv_klass, itentry_off); 1215 } 1216 1217 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1218 // if (scan->interface() == intf) { 1219 // result = (klass + scan->offset() + itable_index); 1220 // } 1221 // } 1222 Label search, found_method; 1223 1224 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1225 cmp(intf_klass, method_result); 1226 br(Assembler::EQ, found_method); 1227 bind(search); 1228 // Check that the previous entry is non-null. A null entry means that 1229 // the receiver class doesn't implement the interface, and wasn't the 1230 // same as when the caller was compiled. 1231 cbz(method_result, L_no_such_interface); 1232 if (itableOffsetEntry::interface_offset() != 0) { 1233 add(scan_temp, scan_temp, scan_step); 1234 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1235 } else { 1236 ldr(method_result, Address(pre(scan_temp, scan_step))); 1237 } 1238 cmp(intf_klass, method_result); 1239 br(Assembler::NE, search); 1240 1241 bind(found_method); 1242 1243 // Got a hit. 1244 if (return_method) { 1245 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1246 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1247 } 1248 } 1249 1250 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1251 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1252 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1253 // The target method is determined by <holder_klass, itable_index>. 1254 // The receiver klass is in recv_klass. 1255 // On success, the result will be in method_result, and execution falls through. 1256 // On failure, execution transfers to the given label. 1257 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1258 Register holder_klass, 1259 Register resolved_klass, 1260 Register method_result, 1261 Register temp_itbl_klass, 1262 Register scan_temp, 1263 int itable_index, 1264 Label& L_no_such_interface) { 1265 // 'method_result' is only used as output register at the very end of this method. 1266 // Until then we can reuse it as 'holder_offset'. 1267 Register holder_offset = method_result; 1268 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1269 1270 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1271 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1272 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1273 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1274 1275 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1276 1277 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1278 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1279 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1280 // temp_itbl_klass = itable[0]._interface; 1281 int vtblEntrySize = vtableEntry::size_in_bytes(); 1282 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1283 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1284 mov(holder_offset, zr); 1285 // scan_temp = &(itable[0]._interface) 1286 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1287 1288 // Initial checks: 1289 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1290 // - if (itable[0] == holder_klass), shortcut to "holder found" 1291 // - if (itable[0] == 0), no such interface 1292 cmp(resolved_klass, holder_klass); 1293 br(Assembler::NE, L_loop_search_resolved_entry); 1294 cmp(holder_klass, temp_itbl_klass); 1295 br(Assembler::EQ, L_holder_found); 1296 cbz(temp_itbl_klass, L_no_such_interface); 1297 1298 // Loop: Look for holder_klass record in itable 1299 // do { 1300 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1301 // if (temp_itbl_klass == holder_klass) { 1302 // goto L_holder_found; // Found! 1303 // } 1304 // } while (temp_itbl_klass != 0); 1305 // goto L_no_such_interface // Not found. 1306 Label L_search_holder; 1307 bind(L_search_holder); 1308 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1309 cmp(holder_klass, temp_itbl_klass); 1310 br(Assembler::EQ, L_holder_found); 1311 cbnz(temp_itbl_klass, L_search_holder); 1312 1313 b(L_no_such_interface); 1314 1315 // Loop: Look for resolved_class record in itable 1316 // while (true) { 1317 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1318 // if (temp_itbl_klass == 0) { 1319 // goto L_no_such_interface; 1320 // } 1321 // if (temp_itbl_klass == resolved_klass) { 1322 // goto L_resolved_found; // Found! 1323 // } 1324 // if (temp_itbl_klass == holder_klass) { 1325 // holder_offset = scan_temp; 1326 // } 1327 // } 1328 // 1329 Label L_loop_search_resolved; 1330 bind(L_loop_search_resolved); 1331 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1332 bind(L_loop_search_resolved_entry); 1333 cbz(temp_itbl_klass, L_no_such_interface); 1334 cmp(resolved_klass, temp_itbl_klass); 1335 br(Assembler::EQ, L_resolved_found); 1336 cmp(holder_klass, temp_itbl_klass); 1337 br(Assembler::NE, L_loop_search_resolved); 1338 mov(holder_offset, scan_temp); 1339 b(L_loop_search_resolved); 1340 1341 // See if we already have a holder klass. If not, go and scan for it. 1342 bind(L_resolved_found); 1343 cbz(holder_offset, L_search_holder); 1344 mov(scan_temp, holder_offset); 1345 1346 // Finally, scan_temp contains holder_klass vtable offset 1347 bind(L_holder_found); 1348 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1349 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1350 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1351 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1352 } 1353 1354 // virtual method calling 1355 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1356 RegisterOrConstant vtable_index, 1357 Register method_result) { 1358 assert(vtableEntry::size() * wordSize == 8, 1359 "adjust the scaling in the code below"); 1360 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1361 1362 if (vtable_index.is_register()) { 1363 lea(method_result, Address(recv_klass, 1364 vtable_index.as_register(), 1365 Address::lsl(LogBytesPerWord))); 1366 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1367 } else { 1368 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1369 ldr(method_result, 1370 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1371 } 1372 } 1373 1374 void MacroAssembler::check_klass_subtype(Register sub_klass, 1375 Register super_klass, 1376 Register temp_reg, 1377 Label& L_success) { 1378 Label L_failure; 1379 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1380 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1381 bind(L_failure); 1382 } 1383 1384 1385 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1386 Register super_klass, 1387 Register temp_reg, 1388 Label* L_success, 1389 Label* L_failure, 1390 Label* L_slow_path, 1391 RegisterOrConstant super_check_offset) { 1392 assert_different_registers(sub_klass, super_klass, temp_reg); 1393 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1394 if (super_check_offset.is_register()) { 1395 assert_different_registers(sub_klass, super_klass, 1396 super_check_offset.as_register()); 1397 } else if (must_load_sco) { 1398 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1399 } 1400 1401 Label L_fallthrough; 1402 int label_nulls = 0; 1403 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1404 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1405 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1406 assert(label_nulls <= 1, "at most one null in the batch"); 1407 1408 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1409 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1410 Address super_check_offset_addr(super_klass, sco_offset); 1411 1412 // Hacked jmp, which may only be used just before L_fallthrough. 1413 #define final_jmp(label) \ 1414 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1415 else b(label) /*omit semi*/ 1416 1417 // If the pointers are equal, we are done (e.g., String[] elements). 1418 // This self-check enables sharing of secondary supertype arrays among 1419 // non-primary types such as array-of-interface. Otherwise, each such 1420 // type would need its own customized SSA. 1421 // We move this check to the front of the fast path because many 1422 // type checks are in fact trivially successful in this manner, 1423 // so we get a nicely predicted branch right at the start of the check. 1424 cmp(sub_klass, super_klass); 1425 br(Assembler::EQ, *L_success); 1426 1427 // Check the supertype display: 1428 if (must_load_sco) { 1429 ldrw(temp_reg, super_check_offset_addr); 1430 super_check_offset = RegisterOrConstant(temp_reg); 1431 } 1432 Address super_check_addr(sub_klass, super_check_offset); 1433 ldr(rscratch1, super_check_addr); 1434 cmp(super_klass, rscratch1); // load displayed supertype 1435 1436 // This check has worked decisively for primary supers. 1437 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1438 // (Secondary supers are interfaces and very deeply nested subtypes.) 1439 // This works in the same check above because of a tricky aliasing 1440 // between the super_cache and the primary super display elements. 1441 // (The 'super_check_addr' can address either, as the case requires.) 1442 // Note that the cache is updated below if it does not help us find 1443 // what we need immediately. 1444 // So if it was a primary super, we can just fail immediately. 1445 // Otherwise, it's the slow path for us (no success at this point). 1446 1447 if (super_check_offset.is_register()) { 1448 br(Assembler::EQ, *L_success); 1449 subs(zr, super_check_offset.as_register(), sc_offset); 1450 if (L_failure == &L_fallthrough) { 1451 br(Assembler::EQ, *L_slow_path); 1452 } else { 1453 br(Assembler::NE, *L_failure); 1454 final_jmp(*L_slow_path); 1455 } 1456 } else if (super_check_offset.as_constant() == sc_offset) { 1457 // Need a slow path; fast failure is impossible. 1458 if (L_slow_path == &L_fallthrough) { 1459 br(Assembler::EQ, *L_success); 1460 } else { 1461 br(Assembler::NE, *L_slow_path); 1462 final_jmp(*L_success); 1463 } 1464 } else { 1465 // No slow path; it's a fast decision. 1466 if (L_failure == &L_fallthrough) { 1467 br(Assembler::EQ, *L_success); 1468 } else { 1469 br(Assembler::NE, *L_failure); 1470 final_jmp(*L_success); 1471 } 1472 } 1473 1474 bind(L_fallthrough); 1475 1476 #undef final_jmp 1477 } 1478 1479 // These two are taken from x86, but they look generally useful 1480 1481 // scans count pointer sized words at [addr] for occurrence of value, 1482 // generic 1483 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1484 Register scratch) { 1485 Label Lloop, Lexit; 1486 cbz(count, Lexit); 1487 bind(Lloop); 1488 ldr(scratch, post(addr, wordSize)); 1489 cmp(value, scratch); 1490 br(EQ, Lexit); 1491 sub(count, count, 1); 1492 cbnz(count, Lloop); 1493 bind(Lexit); 1494 } 1495 1496 // scans count 4 byte words at [addr] for occurrence of value, 1497 // generic 1498 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1499 Register scratch) { 1500 Label Lloop, Lexit; 1501 cbz(count, Lexit); 1502 bind(Lloop); 1503 ldrw(scratch, post(addr, wordSize)); 1504 cmpw(value, scratch); 1505 br(EQ, Lexit); 1506 sub(count, count, 1); 1507 cbnz(count, Lloop); 1508 bind(Lexit); 1509 } 1510 1511 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1512 Register super_klass, 1513 Register temp_reg, 1514 Register temp2_reg, 1515 Label* L_success, 1516 Label* L_failure, 1517 bool set_cond_codes) { 1518 // NB! Callers may assume that, when temp2_reg is a valid register, 1519 // this code sets it to a nonzero value. 1520 1521 assert_different_registers(sub_klass, super_klass, temp_reg); 1522 if (temp2_reg != noreg) 1523 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1524 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1525 1526 Label L_fallthrough; 1527 int label_nulls = 0; 1528 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1529 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1530 assert(label_nulls <= 1, "at most one null in the batch"); 1531 1532 // a couple of useful fields in sub_klass: 1533 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1534 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1535 Address secondary_supers_addr(sub_klass, ss_offset); 1536 Address super_cache_addr( sub_klass, sc_offset); 1537 1538 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1539 1540 // Do a linear scan of the secondary super-klass chain. 1541 // This code is rarely used, so simplicity is a virtue here. 1542 // The repne_scan instruction uses fixed registers, which we must spill. 1543 // Don't worry too much about pre-existing connections with the input regs. 1544 1545 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1546 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1547 1548 RegSet pushed_registers; 1549 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1550 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1551 1552 if (super_klass != r0) { 1553 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1554 } 1555 1556 push(pushed_registers, sp); 1557 1558 // Get super_klass value into r0 (even if it was in r5 or r2). 1559 if (super_klass != r0) { 1560 mov(r0, super_klass); 1561 } 1562 1563 #ifndef PRODUCT 1564 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1565 #endif //PRODUCT 1566 1567 // We will consult the secondary-super array. 1568 ldr(r5, secondary_supers_addr); 1569 // Load the array length. 1570 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1571 // Skip to start of data. 1572 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1573 1574 cmp(sp, zr); // Clear Z flag; SP is never zero 1575 // Scan R2 words at [R5] for an occurrence of R0. 1576 // Set NZ/Z based on last compare. 1577 repne_scan(r5, r0, r2, rscratch1); 1578 1579 // Unspill the temp. registers: 1580 pop(pushed_registers, sp); 1581 1582 br(Assembler::NE, *L_failure); 1583 1584 // Success. Cache the super we found and proceed in triumph. 1585 str(super_klass, super_cache_addr); 1586 1587 if (L_success != &L_fallthrough) { 1588 b(*L_success); 1589 } 1590 1591 #undef IS_A_TEMP 1592 1593 bind(L_fallthrough); 1594 } 1595 1596 // Ensure that the inline code and the stub are using the same registers. 1597 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1598 do { \ 1599 assert(r_super_klass == r0 && \ 1600 r_array_base == r1 && \ 1601 r_array_length == r2 && \ 1602 (r_array_index == r3 || r_array_index == noreg) && \ 1603 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1604 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1605 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1606 } while(0) 1607 1608 // Return true: we succeeded in generating this code 1609 bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 1610 Register r_super_klass, 1611 Register temp1, 1612 Register temp2, 1613 Register temp3, 1614 FloatRegister vtemp, 1615 Register result, 1616 u1 super_klass_slot, 1617 bool stub_is_near) { 1618 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1619 1620 Label L_fallthrough; 1621 1622 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1623 1624 const Register 1625 r_array_base = temp1, // r1 1626 r_array_length = temp2, // r2 1627 r_array_index = temp3, // r3 1628 r_bitmap = rscratch2; 1629 1630 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1631 1632 u1 bit = super_klass_slot; 1633 1634 // Make sure that result is nonzero if the TBZ below misses. 1635 mov(result, 1); 1636 1637 // We're going to need the bitmap in a vector reg and in a core reg, 1638 // so load both now. 1639 ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 1640 if (bit != 0) { 1641 ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset())); 1642 } 1643 // First check the bitmap to see if super_klass might be present. If 1644 // the bit is zero, we are certain that super_klass is not one of 1645 // the secondary supers. 1646 tbz(r_bitmap, bit, L_fallthrough); 1647 1648 // Get the first array index that can contain super_klass into r_array_index. 1649 if (bit != 0) { 1650 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1651 cnt(vtemp, T8B, vtemp); 1652 addv(vtemp, T8B, vtemp); 1653 fmovd(r_array_index, vtemp); 1654 } else { 1655 mov(r_array_index, (u1)1); 1656 } 1657 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1658 1659 // We will consult the secondary-super array. 1660 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1661 1662 // The value i in r_array_index is >= 1, so even though r_array_base 1663 // points to the length, we don't need to adjust it to point to the 1664 // data. 1665 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1666 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1667 1668 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1669 eor(result, result, r_super_klass); 1670 cbz(result, L_fallthrough); // Found a match 1671 1672 // Is there another entry to check? Consult the bitmap. 1673 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1674 1675 // Linear probe. 1676 if (bit != 0) { 1677 ror(r_bitmap, r_bitmap, bit); 1678 } 1679 1680 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1681 // The next slot to be inspected, by the stub we're about to call, 1682 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1683 // have been checked. 1684 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1685 if (stub_is_near) { 1686 bl(stub); 1687 } else { 1688 address call = trampoline_call(stub); 1689 if (call == nullptr) { 1690 return false; // trampoline allocation failed 1691 } 1692 } 1693 1694 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1695 1696 bind(L_fallthrough); 1697 1698 if (VerifySecondarySupers) { 1699 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1700 temp1, temp2, result); // r1, r2, r5 1701 } 1702 return true; 1703 } 1704 1705 // Called by code generated by check_klass_subtype_slow_path 1706 // above. This is called when there is a collision in the hashed 1707 // lookup in the secondary supers array. 1708 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1709 Register r_array_base, 1710 Register r_array_index, 1711 Register r_bitmap, 1712 Register temp1, 1713 Register result) { 1714 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1715 1716 const Register 1717 r_array_length = temp1, 1718 r_sub_klass = noreg; // unused 1719 1720 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1721 1722 Label L_fallthrough, L_huge; 1723 1724 // Load the array length. 1725 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1726 // And adjust the array base to point to the data. 1727 // NB! Effectively increments current slot index by 1. 1728 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1729 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1730 1731 // The bitmap is full to bursting. 1732 // Implicit invariant: BITMAP_FULL implies (length > 0) 1733 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1734 cmn(r_bitmap, (u1)1); 1735 br(EQ, L_huge); 1736 1737 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1738 // current slot (at secondary_supers[r_array_index]) has not yet 1739 // been inspected, and r_array_index may be out of bounds if we 1740 // wrapped around the end of the array. 1741 1742 { // This is conventional linear probing, but instead of terminating 1743 // when a null entry is found in the table, we maintain a bitmap 1744 // in which a 0 indicates missing entries. 1745 // The check above guarantees there are 0s in the bitmap, so the loop 1746 // eventually terminates. 1747 Label L_loop; 1748 bind(L_loop); 1749 1750 // Check for wraparound. 1751 cmp(r_array_index, r_array_length); 1752 csel(r_array_index, zr, r_array_index, GE); 1753 1754 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1755 eor(result, rscratch1, r_super_klass); 1756 cbz(result, L_fallthrough); 1757 1758 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1759 1760 ror(r_bitmap, r_bitmap, 1); 1761 add(r_array_index, r_array_index, 1); 1762 b(L_loop); 1763 } 1764 1765 { // Degenerate case: more than 64 secondary supers. 1766 // FIXME: We could do something smarter here, maybe a vectorized 1767 // comparison or a binary search, but is that worth any added 1768 // complexity? 1769 bind(L_huge); 1770 cmp(sp, zr); // Clear Z flag; SP is never zero 1771 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1772 cset(result, NE); // result == 0 iff we got a match. 1773 } 1774 1775 bind(L_fallthrough); 1776 } 1777 1778 // Make sure that the hashed lookup and a linear scan agree. 1779 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1780 Register r_super_klass, 1781 Register temp1, 1782 Register temp2, 1783 Register result) { 1784 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1785 1786 const Register 1787 r_array_base = temp1, 1788 r_array_length = temp2, 1789 r_array_index = noreg, // unused 1790 r_bitmap = noreg; // unused 1791 1792 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1793 1794 BLOCK_COMMENT("verify_secondary_supers_table {"); 1795 1796 // We will consult the secondary-super array. 1797 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1798 1799 // Load the array length. 1800 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1801 // And adjust the array base to point to the data. 1802 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1803 1804 cmp(sp, zr); // Clear Z flag; SP is never zero 1805 // Scan R2 words at [R5] for an occurrence of R0. 1806 // Set NZ/Z based on last compare. 1807 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 1808 // rscratch1 == 0 iff we got a match. 1809 cset(rscratch1, NE); 1810 1811 Label passed; 1812 cmp(result, zr); 1813 cset(result, NE); // normalize result to 0/1 for comparison 1814 1815 cmp(rscratch1, result); 1816 br(EQ, passed); 1817 { 1818 mov(r0, r_super_klass); // r0 <- r0 1819 mov(r1, r_sub_klass); // r1 <- r4 1820 mov(r2, /*expected*/rscratch1); // r2 <- r8 1821 mov(r3, result); // r3 <- r5 1822 mov(r4, (address)("mismatch")); // r4 <- const 1823 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 1824 should_not_reach_here(); 1825 } 1826 bind(passed); 1827 1828 BLOCK_COMMENT("} verify_secondary_supers_table"); 1829 } 1830 1831 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1832 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1833 assert_different_registers(klass, rthread, scratch); 1834 1835 Label L_fallthrough, L_tmp; 1836 if (L_fast_path == nullptr) { 1837 L_fast_path = &L_fallthrough; 1838 } else if (L_slow_path == nullptr) { 1839 L_slow_path = &L_fallthrough; 1840 } 1841 // Fast path check: class is fully initialized 1842 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset())); 1843 subs(zr, scratch, InstanceKlass::fully_initialized); 1844 br(Assembler::EQ, *L_fast_path); 1845 1846 // Fast path check: current thread is initializer thread 1847 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1848 cmp(rthread, scratch); 1849 1850 if (L_slow_path == &L_fallthrough) { 1851 br(Assembler::EQ, *L_fast_path); 1852 bind(*L_slow_path); 1853 } else if (L_fast_path == &L_fallthrough) { 1854 br(Assembler::NE, *L_slow_path); 1855 bind(*L_fast_path); 1856 } else { 1857 Unimplemented(); 1858 } 1859 } 1860 1861 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1862 if (!VerifyOops) return; 1863 1864 // Pass register number to verify_oop_subroutine 1865 const char* b = nullptr; 1866 { 1867 ResourceMark rm; 1868 stringStream ss; 1869 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1870 b = code_string(ss.as_string()); 1871 } 1872 BLOCK_COMMENT("verify_oop {"); 1873 1874 strip_return_address(); // This might happen within a stack frame. 1875 protect_return_address(); 1876 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1877 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1878 1879 mov(r0, reg); 1880 movptr(rscratch1, (uintptr_t)(address)b); 1881 1882 // call indirectly to solve generation ordering problem 1883 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1884 ldr(rscratch2, Address(rscratch2)); 1885 blr(rscratch2); 1886 1887 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1888 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1889 authenticate_return_address(); 1890 1891 BLOCK_COMMENT("} verify_oop"); 1892 } 1893 1894 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1895 if (!VerifyOops) return; 1896 1897 const char* b = nullptr; 1898 { 1899 ResourceMark rm; 1900 stringStream ss; 1901 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1902 b = code_string(ss.as_string()); 1903 } 1904 BLOCK_COMMENT("verify_oop_addr {"); 1905 1906 strip_return_address(); // This might happen within a stack frame. 1907 protect_return_address(); 1908 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1909 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1910 1911 // addr may contain sp so we will have to adjust it based on the 1912 // pushes that we just did. 1913 if (addr.uses(sp)) { 1914 lea(r0, addr); 1915 ldr(r0, Address(r0, 4 * wordSize)); 1916 } else { 1917 ldr(r0, addr); 1918 } 1919 movptr(rscratch1, (uintptr_t)(address)b); 1920 1921 // call indirectly to solve generation ordering problem 1922 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1923 ldr(rscratch2, Address(rscratch2)); 1924 blr(rscratch2); 1925 1926 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1927 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1928 authenticate_return_address(); 1929 1930 BLOCK_COMMENT("} verify_oop_addr"); 1931 } 1932 1933 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1934 int extra_slot_offset) { 1935 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1936 int stackElementSize = Interpreter::stackElementSize; 1937 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1938 #ifdef ASSERT 1939 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1940 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1941 #endif 1942 if (arg_slot.is_constant()) { 1943 return Address(esp, arg_slot.as_constant() * stackElementSize 1944 + offset); 1945 } else { 1946 add(rscratch1, esp, arg_slot.as_register(), 1947 ext::uxtx, exact_log2(stackElementSize)); 1948 return Address(rscratch1, offset); 1949 } 1950 } 1951 1952 void MacroAssembler::call_VM_leaf_base(address entry_point, 1953 int number_of_arguments, 1954 Label *retaddr) { 1955 Label not_preempted; 1956 1957 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1958 str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset())); 1959 1960 mov(rscratch1, entry_point); 1961 blr(rscratch1); 1962 if (retaddr) 1963 bind(*retaddr); 1964 1965 if (entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter) || 1966 entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj)) { 1967 ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset())); 1968 cbz(rscratch1, not_preempted); 1969 str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset())); 1970 br(rscratch1); 1971 } 1972 1973 bind(not_preempted); 1974 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1975 } 1976 1977 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1978 call_VM_leaf_base(entry_point, number_of_arguments); 1979 } 1980 1981 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1982 pass_arg0(this, arg_0); 1983 call_VM_leaf_base(entry_point, 1); 1984 } 1985 1986 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1987 assert_different_registers(arg_1, c_rarg0); 1988 pass_arg0(this, arg_0); 1989 pass_arg1(this, arg_1); 1990 call_VM_leaf_base(entry_point, 2); 1991 } 1992 1993 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1994 Register arg_1, Register arg_2) { 1995 assert_different_registers(arg_1, c_rarg0); 1996 assert_different_registers(arg_2, c_rarg0, c_rarg1); 1997 pass_arg0(this, arg_0); 1998 pass_arg1(this, arg_1); 1999 pass_arg2(this, arg_2); 2000 call_VM_leaf_base(entry_point, 3); 2001 } 2002 2003 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2004 pass_arg0(this, arg_0); 2005 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2006 } 2007 2008 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2009 2010 assert_different_registers(arg_0, c_rarg1); 2011 pass_arg1(this, arg_1); 2012 pass_arg0(this, arg_0); 2013 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2014 } 2015 2016 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2017 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2018 assert_different_registers(arg_1, c_rarg2); 2019 pass_arg2(this, arg_2); 2020 pass_arg1(this, arg_1); 2021 pass_arg0(this, arg_0); 2022 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2023 } 2024 2025 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2026 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2027 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2028 assert_different_registers(arg_2, c_rarg3); 2029 pass_arg3(this, arg_3); 2030 pass_arg2(this, arg_2); 2031 pass_arg1(this, arg_1); 2032 pass_arg0(this, arg_0); 2033 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2034 } 2035 2036 void MacroAssembler::null_check(Register reg, int offset) { 2037 if (needs_explicit_null_check(offset)) { 2038 // provoke OS null exception if reg is null by 2039 // accessing M[reg] w/o changing any registers 2040 // NOTE: this is plenty to provoke a segv 2041 ldr(zr, Address(reg)); 2042 } else { 2043 // nothing to do, (later) access of M[reg + offset] 2044 // will provoke OS null exception if reg is null 2045 } 2046 } 2047 2048 // MacroAssembler protected routines needed to implement 2049 // public methods 2050 2051 void MacroAssembler::mov(Register r, Address dest) { 2052 code_section()->relocate(pc(), dest.rspec()); 2053 uint64_t imm64 = (uint64_t)dest.target(); 2054 movptr(r, imm64); 2055 } 2056 2057 // Move a constant pointer into r. In AArch64 mode the virtual 2058 // address space is 48 bits in size, so we only need three 2059 // instructions to create a patchable instruction sequence that can 2060 // reach anywhere. 2061 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2062 #ifndef PRODUCT 2063 { 2064 char buffer[64]; 2065 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2066 block_comment(buffer); 2067 } 2068 #endif 2069 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2070 movz(r, imm64 & 0xffff); 2071 imm64 >>= 16; 2072 movk(r, imm64 & 0xffff, 16); 2073 imm64 >>= 16; 2074 movk(r, imm64 & 0xffff, 32); 2075 } 2076 2077 // Macro to mov replicated immediate to vector register. 2078 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2079 // the upper 56/48/32 bits must be zeros for B/H/S type. 2080 // Vd will get the following values for different arrangements in T 2081 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2082 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2083 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2084 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2085 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2086 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2087 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2088 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2089 // Clobbers rscratch1 2090 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2091 assert(T != T1Q, "unsupported"); 2092 if (T == T1D || T == T2D) { 2093 int imm = operand_valid_for_movi_immediate(imm64, T); 2094 if (-1 != imm) { 2095 movi(Vd, T, imm); 2096 } else { 2097 mov(rscratch1, imm64); 2098 dup(Vd, T, rscratch1); 2099 } 2100 return; 2101 } 2102 2103 #ifdef ASSERT 2104 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2105 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2106 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2107 #endif 2108 int shift = operand_valid_for_movi_immediate(imm64, T); 2109 uint32_t imm32 = imm64 & 0xffffffffULL; 2110 if (shift >= 0) { 2111 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2112 } else { 2113 movw(rscratch1, imm32); 2114 dup(Vd, T, rscratch1); 2115 } 2116 } 2117 2118 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2119 { 2120 #ifndef PRODUCT 2121 { 2122 char buffer[64]; 2123 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2124 block_comment(buffer); 2125 } 2126 #endif 2127 if (operand_valid_for_logical_immediate(false, imm64)) { 2128 orr(dst, zr, imm64); 2129 } else { 2130 // we can use a combination of MOVZ or MOVN with 2131 // MOVK to build up the constant 2132 uint64_t imm_h[4]; 2133 int zero_count = 0; 2134 int neg_count = 0; 2135 int i; 2136 for (i = 0; i < 4; i++) { 2137 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2138 if (imm_h[i] == 0) { 2139 zero_count++; 2140 } else if (imm_h[i] == 0xffffL) { 2141 neg_count++; 2142 } 2143 } 2144 if (zero_count == 4) { 2145 // one MOVZ will do 2146 movz(dst, 0); 2147 } else if (neg_count == 4) { 2148 // one MOVN will do 2149 movn(dst, 0); 2150 } else if (zero_count == 3) { 2151 for (i = 0; i < 4; i++) { 2152 if (imm_h[i] != 0L) { 2153 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2154 break; 2155 } 2156 } 2157 } else if (neg_count == 3) { 2158 // one MOVN will do 2159 for (int i = 0; i < 4; i++) { 2160 if (imm_h[i] != 0xffffL) { 2161 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2162 break; 2163 } 2164 } 2165 } else if (zero_count == 2) { 2166 // one MOVZ and one MOVK will do 2167 for (i = 0; i < 3; i++) { 2168 if (imm_h[i] != 0L) { 2169 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2170 i++; 2171 break; 2172 } 2173 } 2174 for (;i < 4; i++) { 2175 if (imm_h[i] != 0L) { 2176 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2177 } 2178 } 2179 } else if (neg_count == 2) { 2180 // one MOVN and one MOVK will do 2181 for (i = 0; i < 4; i++) { 2182 if (imm_h[i] != 0xffffL) { 2183 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2184 i++; 2185 break; 2186 } 2187 } 2188 for (;i < 4; i++) { 2189 if (imm_h[i] != 0xffffL) { 2190 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2191 } 2192 } 2193 } else if (zero_count == 1) { 2194 // one MOVZ and two MOVKs will do 2195 for (i = 0; i < 4; i++) { 2196 if (imm_h[i] != 0L) { 2197 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2198 i++; 2199 break; 2200 } 2201 } 2202 for (;i < 4; i++) { 2203 if (imm_h[i] != 0x0L) { 2204 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2205 } 2206 } 2207 } else if (neg_count == 1) { 2208 // one MOVN and two MOVKs will do 2209 for (i = 0; i < 4; i++) { 2210 if (imm_h[i] != 0xffffL) { 2211 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2212 i++; 2213 break; 2214 } 2215 } 2216 for (;i < 4; i++) { 2217 if (imm_h[i] != 0xffffL) { 2218 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2219 } 2220 } 2221 } else { 2222 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2223 movz(dst, (uint32_t)imm_h[0], 0); 2224 for (i = 1; i < 4; i++) { 2225 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2226 } 2227 } 2228 } 2229 } 2230 2231 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2232 { 2233 #ifndef PRODUCT 2234 { 2235 char buffer[64]; 2236 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2237 block_comment(buffer); 2238 } 2239 #endif 2240 if (operand_valid_for_logical_immediate(true, imm32)) { 2241 orrw(dst, zr, imm32); 2242 } else { 2243 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2244 // constant 2245 uint32_t imm_h[2]; 2246 imm_h[0] = imm32 & 0xffff; 2247 imm_h[1] = ((imm32 >> 16) & 0xffff); 2248 if (imm_h[0] == 0) { 2249 movzw(dst, imm_h[1], 16); 2250 } else if (imm_h[0] == 0xffff) { 2251 movnw(dst, imm_h[1] ^ 0xffff, 16); 2252 } else if (imm_h[1] == 0) { 2253 movzw(dst, imm_h[0], 0); 2254 } else if (imm_h[1] == 0xffff) { 2255 movnw(dst, imm_h[0] ^ 0xffff, 0); 2256 } else { 2257 // use a MOVZ and MOVK (makes it easier to debug) 2258 movzw(dst, imm_h[0], 0); 2259 movkw(dst, imm_h[1], 16); 2260 } 2261 } 2262 } 2263 2264 // Form an address from base + offset in Rd. Rd may or may 2265 // not actually be used: you must use the Address that is returned. 2266 // It is up to you to ensure that the shift provided matches the size 2267 // of your data. 2268 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2269 if (Address::offset_ok_for_immed(byte_offset, shift)) 2270 // It fits; no need for any heroics 2271 return Address(base, byte_offset); 2272 2273 // Don't do anything clever with negative or misaligned offsets 2274 unsigned mask = (1 << shift) - 1; 2275 if (byte_offset < 0 || byte_offset & mask) { 2276 mov(Rd, byte_offset); 2277 add(Rd, base, Rd); 2278 return Address(Rd); 2279 } 2280 2281 // See if we can do this with two 12-bit offsets 2282 { 2283 uint64_t word_offset = byte_offset >> shift; 2284 uint64_t masked_offset = word_offset & 0xfff000; 2285 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2286 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2287 add(Rd, base, masked_offset << shift); 2288 word_offset -= masked_offset; 2289 return Address(Rd, word_offset << shift); 2290 } 2291 } 2292 2293 // Do it the hard way 2294 mov(Rd, byte_offset); 2295 add(Rd, base, Rd); 2296 return Address(Rd); 2297 } 2298 2299 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2300 bool want_remainder, Register scratch) 2301 { 2302 // Full implementation of Java idiv and irem. The function 2303 // returns the (pc) offset of the div instruction - may be needed 2304 // for implicit exceptions. 2305 // 2306 // constraint : ra/rb =/= scratch 2307 // normal case 2308 // 2309 // input : ra: dividend 2310 // rb: divisor 2311 // 2312 // result: either 2313 // quotient (= ra idiv rb) 2314 // remainder (= ra irem rb) 2315 2316 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2317 2318 int idivl_offset = offset(); 2319 if (! want_remainder) { 2320 sdivw(result, ra, rb); 2321 } else { 2322 sdivw(scratch, ra, rb); 2323 Assembler::msubw(result, scratch, rb, ra); 2324 } 2325 2326 return idivl_offset; 2327 } 2328 2329 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2330 bool want_remainder, Register scratch) 2331 { 2332 // Full implementation of Java ldiv and lrem. The function 2333 // returns the (pc) offset of the div instruction - may be needed 2334 // for implicit exceptions. 2335 // 2336 // constraint : ra/rb =/= scratch 2337 // normal case 2338 // 2339 // input : ra: dividend 2340 // rb: divisor 2341 // 2342 // result: either 2343 // quotient (= ra idiv rb) 2344 // remainder (= ra irem rb) 2345 2346 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2347 2348 int idivq_offset = offset(); 2349 if (! want_remainder) { 2350 sdiv(result, ra, rb); 2351 } else { 2352 sdiv(scratch, ra, rb); 2353 Assembler::msub(result, scratch, rb, ra); 2354 } 2355 2356 return idivq_offset; 2357 } 2358 2359 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2360 address prev = pc() - NativeMembar::instruction_size; 2361 address last = code()->last_insn(); 2362 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2363 NativeMembar *bar = NativeMembar_at(prev); 2364 if (AlwaysMergeDMB) { 2365 bar->set_kind(bar->get_kind() | order_constraint); 2366 BLOCK_COMMENT("merged membar(always)"); 2367 return; 2368 } 2369 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2370 // doing so would introduce a StoreLoad which the caller did not 2371 // intend 2372 if (bar->get_kind() == order_constraint 2373 || bar->get_kind() == AnyAny 2374 || order_constraint == AnyAny) { 2375 // We are merging two memory barrier instructions. On AArch64 we 2376 // can do this simply by ORing them together. 2377 bar->set_kind(bar->get_kind() | order_constraint); 2378 BLOCK_COMMENT("merged membar"); 2379 return; 2380 } else { 2381 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2382 // We need check the last 2 instructions 2383 address prev2 = prev - NativeMembar::instruction_size; 2384 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2385 NativeMembar *bar2 = NativeMembar_at(prev2); 2386 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2387 BLOCK_COMMENT("merged membar(elided)"); 2388 return; 2389 } 2390 } 2391 } 2392 code()->set_last_insn(pc()); 2393 dmb(Assembler::barrier(order_constraint)); 2394 } 2395 2396 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2397 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2398 merge_ldst(rt, adr, size_in_bytes, is_store); 2399 code()->clear_last_insn(); 2400 return true; 2401 } else { 2402 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2403 const uint64_t mask = size_in_bytes - 1; 2404 if (adr.getMode() == Address::base_plus_offset && 2405 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2406 code()->set_last_insn(pc()); 2407 } 2408 return false; 2409 } 2410 } 2411 2412 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2413 // We always try to merge two adjacent loads into one ldp. 2414 if (!try_merge_ldst(Rx, adr, 8, false)) { 2415 Assembler::ldr(Rx, adr); 2416 } 2417 } 2418 2419 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2420 // We always try to merge two adjacent loads into one ldp. 2421 if (!try_merge_ldst(Rw, adr, 4, false)) { 2422 Assembler::ldrw(Rw, adr); 2423 } 2424 } 2425 2426 void MacroAssembler::str(Register Rx, const Address &adr) { 2427 // We always try to merge two adjacent stores into one stp. 2428 if (!try_merge_ldst(Rx, adr, 8, true)) { 2429 Assembler::str(Rx, adr); 2430 } 2431 } 2432 2433 void MacroAssembler::strw(Register Rw, const Address &adr) { 2434 // We always try to merge two adjacent stores into one stp. 2435 if (!try_merge_ldst(Rw, adr, 4, true)) { 2436 Assembler::strw(Rw, adr); 2437 } 2438 } 2439 2440 // MacroAssembler routines found actually to be needed 2441 2442 void MacroAssembler::push(Register src) 2443 { 2444 str(src, Address(pre(esp, -1 * wordSize))); 2445 } 2446 2447 void MacroAssembler::pop(Register dst) 2448 { 2449 ldr(dst, Address(post(esp, 1 * wordSize))); 2450 } 2451 2452 // Note: load_unsigned_short used to be called load_unsigned_word. 2453 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2454 int off = offset(); 2455 ldrh(dst, src); 2456 return off; 2457 } 2458 2459 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2460 int off = offset(); 2461 ldrb(dst, src); 2462 return off; 2463 } 2464 2465 int MacroAssembler::load_signed_short(Register dst, Address src) { 2466 int off = offset(); 2467 ldrsh(dst, src); 2468 return off; 2469 } 2470 2471 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2472 int off = offset(); 2473 ldrsb(dst, src); 2474 return off; 2475 } 2476 2477 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2478 int off = offset(); 2479 ldrshw(dst, src); 2480 return off; 2481 } 2482 2483 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2484 int off = offset(); 2485 ldrsbw(dst, src); 2486 return off; 2487 } 2488 2489 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2490 switch (size_in_bytes) { 2491 case 8: ldr(dst, src); break; 2492 case 4: ldrw(dst, src); break; 2493 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2494 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2495 default: ShouldNotReachHere(); 2496 } 2497 } 2498 2499 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2500 switch (size_in_bytes) { 2501 case 8: str(src, dst); break; 2502 case 4: strw(src, dst); break; 2503 case 2: strh(src, dst); break; 2504 case 1: strb(src, dst); break; 2505 default: ShouldNotReachHere(); 2506 } 2507 } 2508 2509 void MacroAssembler::decrementw(Register reg, int value) 2510 { 2511 if (value < 0) { incrementw(reg, -value); return; } 2512 if (value == 0) { return; } 2513 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2514 /* else */ { 2515 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2516 movw(rscratch2, (unsigned)value); 2517 subw(reg, reg, rscratch2); 2518 } 2519 } 2520 2521 void MacroAssembler::decrement(Register reg, int value) 2522 { 2523 if (value < 0) { increment(reg, -value); return; } 2524 if (value == 0) { return; } 2525 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2526 /* else */ { 2527 assert(reg != rscratch2, "invalid dst for register decrement"); 2528 mov(rscratch2, (uint64_t)value); 2529 sub(reg, reg, rscratch2); 2530 } 2531 } 2532 2533 void MacroAssembler::decrementw(Address dst, int value) 2534 { 2535 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2536 if (dst.getMode() == Address::literal) { 2537 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2538 lea(rscratch2, dst); 2539 dst = Address(rscratch2); 2540 } 2541 ldrw(rscratch1, dst); 2542 decrementw(rscratch1, value); 2543 strw(rscratch1, dst); 2544 } 2545 2546 void MacroAssembler::decrement(Address dst, int value) 2547 { 2548 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2549 if (dst.getMode() == Address::literal) { 2550 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2551 lea(rscratch2, dst); 2552 dst = Address(rscratch2); 2553 } 2554 ldr(rscratch1, dst); 2555 decrement(rscratch1, value); 2556 str(rscratch1, dst); 2557 } 2558 2559 void MacroAssembler::incrementw(Register reg, int value) 2560 { 2561 if (value < 0) { decrementw(reg, -value); return; } 2562 if (value == 0) { return; } 2563 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2564 /* else */ { 2565 assert(reg != rscratch2, "invalid dst for register increment"); 2566 movw(rscratch2, (unsigned)value); 2567 addw(reg, reg, rscratch2); 2568 } 2569 } 2570 2571 void MacroAssembler::increment(Register reg, int value) 2572 { 2573 if (value < 0) { decrement(reg, -value); return; } 2574 if (value == 0) { return; } 2575 if (value < (1 << 12)) { add(reg, reg, value); return; } 2576 /* else */ { 2577 assert(reg != rscratch2, "invalid dst for register increment"); 2578 movw(rscratch2, (unsigned)value); 2579 add(reg, reg, rscratch2); 2580 } 2581 } 2582 2583 void MacroAssembler::incrementw(Address dst, int value) 2584 { 2585 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2586 if (dst.getMode() == Address::literal) { 2587 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2588 lea(rscratch2, dst); 2589 dst = Address(rscratch2); 2590 } 2591 ldrw(rscratch1, dst); 2592 incrementw(rscratch1, value); 2593 strw(rscratch1, dst); 2594 } 2595 2596 void MacroAssembler::increment(Address dst, int value) 2597 { 2598 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2599 if (dst.getMode() == Address::literal) { 2600 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2601 lea(rscratch2, dst); 2602 dst = Address(rscratch2); 2603 } 2604 ldr(rscratch1, dst); 2605 increment(rscratch1, value); 2606 str(rscratch1, dst); 2607 } 2608 2609 // Push lots of registers in the bit set supplied. Don't push sp. 2610 // Return the number of words pushed 2611 int MacroAssembler::push(unsigned int bitset, Register stack) { 2612 int words_pushed = 0; 2613 2614 // Scan bitset to accumulate register pairs 2615 unsigned char regs[32]; 2616 int count = 0; 2617 for (int reg = 0; reg <= 30; reg++) { 2618 if (1 & bitset) 2619 regs[count++] = reg; 2620 bitset >>= 1; 2621 } 2622 regs[count++] = zr->raw_encoding(); 2623 count &= ~1; // Only push an even number of regs 2624 2625 if (count) { 2626 stp(as_Register(regs[0]), as_Register(regs[1]), 2627 Address(pre(stack, -count * wordSize))); 2628 words_pushed += 2; 2629 } 2630 for (int i = 2; i < count; i += 2) { 2631 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2632 Address(stack, i * wordSize)); 2633 words_pushed += 2; 2634 } 2635 2636 assert(words_pushed == count, "oops, pushed != count"); 2637 2638 return count; 2639 } 2640 2641 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2642 int words_pushed = 0; 2643 2644 // Scan bitset to accumulate register pairs 2645 unsigned char regs[32]; 2646 int count = 0; 2647 for (int reg = 0; reg <= 30; reg++) { 2648 if (1 & bitset) 2649 regs[count++] = reg; 2650 bitset >>= 1; 2651 } 2652 regs[count++] = zr->raw_encoding(); 2653 count &= ~1; 2654 2655 for (int i = 2; i < count; i += 2) { 2656 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2657 Address(stack, i * wordSize)); 2658 words_pushed += 2; 2659 } 2660 if (count) { 2661 ldp(as_Register(regs[0]), as_Register(regs[1]), 2662 Address(post(stack, count * wordSize))); 2663 words_pushed += 2; 2664 } 2665 2666 assert(words_pushed == count, "oops, pushed != count"); 2667 2668 return count; 2669 } 2670 2671 // Push lots of registers in the bit set supplied. Don't push sp. 2672 // Return the number of dwords pushed 2673 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2674 int words_pushed = 0; 2675 bool use_sve = false; 2676 int sve_vector_size_in_bytes = 0; 2677 2678 #ifdef COMPILER2 2679 use_sve = Matcher::supports_scalable_vector(); 2680 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2681 #endif 2682 2683 // Scan bitset to accumulate register pairs 2684 unsigned char regs[32]; 2685 int count = 0; 2686 for (int reg = 0; reg <= 31; reg++) { 2687 if (1 & bitset) 2688 regs[count++] = reg; 2689 bitset >>= 1; 2690 } 2691 2692 if (count == 0) { 2693 return 0; 2694 } 2695 2696 if (mode == PushPopFull) { 2697 if (use_sve && sve_vector_size_in_bytes > 16) { 2698 mode = PushPopSVE; 2699 } else { 2700 mode = PushPopNeon; 2701 } 2702 } 2703 2704 #ifndef PRODUCT 2705 { 2706 char buffer[48]; 2707 if (mode == PushPopSVE) { 2708 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2709 } else if (mode == PushPopNeon) { 2710 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2711 } else { 2712 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2713 } 2714 block_comment(buffer); 2715 } 2716 #endif 2717 2718 if (mode == PushPopSVE) { 2719 sub(stack, stack, sve_vector_size_in_bytes * count); 2720 for (int i = 0; i < count; i++) { 2721 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2722 } 2723 return count * sve_vector_size_in_bytes / 8; 2724 } 2725 2726 if (mode == PushPopNeon) { 2727 if (count == 1) { 2728 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2729 return 2; 2730 } 2731 2732 bool odd = (count & 1) == 1; 2733 int push_slots = count + (odd ? 1 : 0); 2734 2735 // Always pushing full 128 bit registers. 2736 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2737 words_pushed += 2; 2738 2739 for (int i = 2; i + 1 < count; i += 2) { 2740 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2741 words_pushed += 2; 2742 } 2743 2744 if (odd) { 2745 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2746 words_pushed++; 2747 } 2748 2749 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2750 return count * 2; 2751 } 2752 2753 if (mode == PushPopFp) { 2754 bool odd = (count & 1) == 1; 2755 int push_slots = count + (odd ? 1 : 0); 2756 2757 if (count == 1) { 2758 // Stack pointer must be 16 bytes aligned 2759 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2760 return 1; 2761 } 2762 2763 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2764 words_pushed += 2; 2765 2766 for (int i = 2; i + 1 < count; i += 2) { 2767 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2768 words_pushed += 2; 2769 } 2770 2771 if (odd) { 2772 // Stack pointer must be 16 bytes aligned 2773 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2774 words_pushed++; 2775 } 2776 2777 assert(words_pushed == count, "oops, pushed != count"); 2778 2779 return count; 2780 } 2781 2782 return 0; 2783 } 2784 2785 // Return the number of dwords popped 2786 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2787 int words_pushed = 0; 2788 bool use_sve = false; 2789 int sve_vector_size_in_bytes = 0; 2790 2791 #ifdef COMPILER2 2792 use_sve = Matcher::supports_scalable_vector(); 2793 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2794 #endif 2795 // Scan bitset to accumulate register pairs 2796 unsigned char regs[32]; 2797 int count = 0; 2798 for (int reg = 0; reg <= 31; reg++) { 2799 if (1 & bitset) 2800 regs[count++] = reg; 2801 bitset >>= 1; 2802 } 2803 2804 if (count == 0) { 2805 return 0; 2806 } 2807 2808 if (mode == PushPopFull) { 2809 if (use_sve && sve_vector_size_in_bytes > 16) { 2810 mode = PushPopSVE; 2811 } else { 2812 mode = PushPopNeon; 2813 } 2814 } 2815 2816 #ifndef PRODUCT 2817 { 2818 char buffer[48]; 2819 if (mode == PushPopSVE) { 2820 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 2821 } else if (mode == PushPopNeon) { 2822 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 2823 } else { 2824 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 2825 } 2826 block_comment(buffer); 2827 } 2828 #endif 2829 2830 if (mode == PushPopSVE) { 2831 for (int i = count - 1; i >= 0; i--) { 2832 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2833 } 2834 add(stack, stack, sve_vector_size_in_bytes * count); 2835 return count * sve_vector_size_in_bytes / 8; 2836 } 2837 2838 if (mode == PushPopNeon) { 2839 if (count == 1) { 2840 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2841 return 2; 2842 } 2843 2844 bool odd = (count & 1) == 1; 2845 int push_slots = count + (odd ? 1 : 0); 2846 2847 if (odd) { 2848 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2849 words_pushed++; 2850 } 2851 2852 for (int i = 2; i + 1 < count; i += 2) { 2853 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2854 words_pushed += 2; 2855 } 2856 2857 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2858 words_pushed += 2; 2859 2860 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2861 2862 return count * 2; 2863 } 2864 2865 if (mode == PushPopFp) { 2866 bool odd = (count & 1) == 1; 2867 int push_slots = count + (odd ? 1 : 0); 2868 2869 if (count == 1) { 2870 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 2871 return 1; 2872 } 2873 2874 if (odd) { 2875 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2876 words_pushed++; 2877 } 2878 2879 for (int i = 2; i + 1 < count; i += 2) { 2880 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2881 words_pushed += 2; 2882 } 2883 2884 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 2885 words_pushed += 2; 2886 2887 assert(words_pushed == count, "oops, pushed != count"); 2888 2889 return count; 2890 } 2891 2892 return 0; 2893 } 2894 2895 // Return the number of dwords pushed 2896 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2897 bool use_sve = false; 2898 int sve_predicate_size_in_slots = 0; 2899 2900 #ifdef COMPILER2 2901 use_sve = Matcher::supports_scalable_vector(); 2902 if (use_sve) { 2903 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2904 } 2905 #endif 2906 2907 if (!use_sve) { 2908 return 0; 2909 } 2910 2911 unsigned char regs[PRegister::number_of_registers]; 2912 int count = 0; 2913 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2914 if (1 & bitset) 2915 regs[count++] = reg; 2916 bitset >>= 1; 2917 } 2918 2919 if (count == 0) { 2920 return 0; 2921 } 2922 2923 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2924 VMRegImpl::stack_slot_size * count, 16); 2925 sub(stack, stack, total_push_bytes); 2926 for (int i = 0; i < count; i++) { 2927 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2928 } 2929 return total_push_bytes / 8; 2930 } 2931 2932 // Return the number of dwords popped 2933 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2934 bool use_sve = false; 2935 int sve_predicate_size_in_slots = 0; 2936 2937 #ifdef COMPILER2 2938 use_sve = Matcher::supports_scalable_vector(); 2939 if (use_sve) { 2940 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2941 } 2942 #endif 2943 2944 if (!use_sve) { 2945 return 0; 2946 } 2947 2948 unsigned char regs[PRegister::number_of_registers]; 2949 int count = 0; 2950 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2951 if (1 & bitset) 2952 regs[count++] = reg; 2953 bitset >>= 1; 2954 } 2955 2956 if (count == 0) { 2957 return 0; 2958 } 2959 2960 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2961 VMRegImpl::stack_slot_size * count, 16); 2962 for (int i = count - 1; i >= 0; i--) { 2963 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2964 } 2965 add(stack, stack, total_pop_bytes); 2966 return total_pop_bytes / 8; 2967 } 2968 2969 #ifdef ASSERT 2970 void MacroAssembler::verify_heapbase(const char* msg) { 2971 #if 0 2972 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2973 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2974 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2975 // rheapbase is allocated as general register 2976 return; 2977 } 2978 if (CheckCompressedOops) { 2979 Label ok; 2980 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2981 cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2982 br(Assembler::EQ, ok); 2983 stop(msg); 2984 bind(ok); 2985 pop(1 << rscratch1->encoding(), sp); 2986 } 2987 #endif 2988 } 2989 #endif 2990 2991 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 2992 assert_different_registers(value, tmp1, tmp2); 2993 Label done, tagged, weak_tagged; 2994 2995 cbz(value, done); // Use null as-is. 2996 tst(value, JNIHandles::tag_mask); // Test for tag. 2997 br(Assembler::NE, tagged); 2998 2999 // Resolve local handle 3000 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3001 verify_oop(value); 3002 b(done); 3003 3004 bind(tagged); 3005 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3006 tbnz(value, 0, weak_tagged); // Test for weak tag. 3007 3008 // Resolve global handle 3009 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3010 verify_oop(value); 3011 b(done); 3012 3013 bind(weak_tagged); 3014 // Resolve jweak. 3015 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3016 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3017 verify_oop(value); 3018 3019 bind(done); 3020 } 3021 3022 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3023 assert_different_registers(value, tmp1, tmp2); 3024 Label done; 3025 3026 cbz(value, done); // Use null as-is. 3027 3028 #ifdef ASSERT 3029 { 3030 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3031 Label valid_global_tag; 3032 tbnz(value, 1, valid_global_tag); // Test for global tag 3033 stop("non global jobject using resolve_global_jobject"); 3034 bind(valid_global_tag); 3035 } 3036 #endif 3037 3038 // Resolve global handle 3039 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3040 verify_oop(value); 3041 3042 bind(done); 3043 } 3044 3045 void MacroAssembler::stop(const char* msg) { 3046 BLOCK_COMMENT(msg); 3047 dcps1(0xdeae); 3048 emit_int64((uintptr_t)msg); 3049 } 3050 3051 void MacroAssembler::unimplemented(const char* what) { 3052 const char* buf = nullptr; 3053 { 3054 ResourceMark rm; 3055 stringStream ss; 3056 ss.print("unimplemented: %s", what); 3057 buf = code_string(ss.as_string()); 3058 } 3059 stop(buf); 3060 } 3061 3062 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3063 #ifdef ASSERT 3064 Label OK; 3065 br(cc, OK); 3066 stop(msg); 3067 bind(OK); 3068 #endif 3069 } 3070 3071 // If a constant does not fit in an immediate field, generate some 3072 // number of MOV instructions and then perform the operation. 3073 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3074 add_sub_imm_insn insn1, 3075 add_sub_reg_insn insn2, 3076 bool is32) { 3077 assert(Rd != zr, "Rd = zr and not setting flags?"); 3078 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3079 if (fits) { 3080 (this->*insn1)(Rd, Rn, imm); 3081 } else { 3082 if (uabs(imm) < (1 << 24)) { 3083 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3084 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3085 } else { 3086 assert_different_registers(Rd, Rn); 3087 mov(Rd, imm); 3088 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3089 } 3090 } 3091 } 3092 3093 // Separate vsn which sets the flags. Optimisations are more restricted 3094 // because we must set the flags correctly. 3095 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3096 add_sub_imm_insn insn1, 3097 add_sub_reg_insn insn2, 3098 bool is32) { 3099 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3100 if (fits) { 3101 (this->*insn1)(Rd, Rn, imm); 3102 } else { 3103 assert_different_registers(Rd, Rn); 3104 assert(Rd != zr, "overflow in immediate operand"); 3105 mov(Rd, imm); 3106 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3107 } 3108 } 3109 3110 3111 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3112 if (increment.is_register()) { 3113 add(Rd, Rn, increment.as_register()); 3114 } else { 3115 add(Rd, Rn, increment.as_constant()); 3116 } 3117 } 3118 3119 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3120 if (increment.is_register()) { 3121 addw(Rd, Rn, increment.as_register()); 3122 } else { 3123 addw(Rd, Rn, increment.as_constant()); 3124 } 3125 } 3126 3127 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3128 if (decrement.is_register()) { 3129 sub(Rd, Rn, decrement.as_register()); 3130 } else { 3131 sub(Rd, Rn, decrement.as_constant()); 3132 } 3133 } 3134 3135 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3136 if (decrement.is_register()) { 3137 subw(Rd, Rn, decrement.as_register()); 3138 } else { 3139 subw(Rd, Rn, decrement.as_constant()); 3140 } 3141 } 3142 3143 void MacroAssembler::reinit_heapbase() 3144 { 3145 if (UseCompressedOops) { 3146 if (Universe::is_fully_initialized()) { 3147 mov(rheapbase, CompressedOops::ptrs_base()); 3148 } else { 3149 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 3150 ldr(rheapbase, Address(rheapbase)); 3151 } 3152 } 3153 } 3154 3155 // this simulates the behaviour of the x86 cmpxchg instruction using a 3156 // load linked/store conditional pair. we use the acquire/release 3157 // versions of these instructions so that we flush pending writes as 3158 // per Java semantics. 3159 3160 // n.b the x86 version assumes the old value to be compared against is 3161 // in rax and updates rax with the value located in memory if the 3162 // cmpxchg fails. we supply a register for the old value explicitly 3163 3164 // the aarch64 load linked/store conditional instructions do not 3165 // accept an offset. so, unlike x86, we must provide a plain register 3166 // to identify the memory word to be compared/exchanged rather than a 3167 // register+offset Address. 3168 3169 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3170 Label &succeed, Label *fail) { 3171 // oldv holds comparison value 3172 // newv holds value to write in exchange 3173 // addr identifies memory word to compare against/update 3174 if (UseLSE) { 3175 mov(tmp, oldv); 3176 casal(Assembler::xword, oldv, newv, addr); 3177 cmp(tmp, oldv); 3178 br(Assembler::EQ, succeed); 3179 membar(AnyAny); 3180 } else { 3181 Label retry_load, nope; 3182 prfm(Address(addr), PSTL1STRM); 3183 bind(retry_load); 3184 // flush and load exclusive from the memory location 3185 // and fail if it is not what we expect 3186 ldaxr(tmp, addr); 3187 cmp(tmp, oldv); 3188 br(Assembler::NE, nope); 3189 // if we store+flush with no intervening write tmp will be zero 3190 stlxr(tmp, newv, addr); 3191 cbzw(tmp, succeed); 3192 // retry so we only ever return after a load fails to compare 3193 // ensures we don't return a stale value after a failed write. 3194 b(retry_load); 3195 // if the memory word differs we return it in oldv and signal a fail 3196 bind(nope); 3197 membar(AnyAny); 3198 mov(oldv, tmp); 3199 } 3200 if (fail) 3201 b(*fail); 3202 } 3203 3204 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3205 Label &succeed, Label *fail) { 3206 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3207 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3208 } 3209 3210 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3211 Label &succeed, Label *fail) { 3212 // oldv holds comparison value 3213 // newv holds value to write in exchange 3214 // addr identifies memory word to compare against/update 3215 // tmp returns 0/1 for success/failure 3216 if (UseLSE) { 3217 mov(tmp, oldv); 3218 casal(Assembler::word, oldv, newv, addr); 3219 cmp(tmp, oldv); 3220 br(Assembler::EQ, succeed); 3221 membar(AnyAny); 3222 } else { 3223 Label retry_load, nope; 3224 prfm(Address(addr), PSTL1STRM); 3225 bind(retry_load); 3226 // flush and load exclusive from the memory location 3227 // and fail if it is not what we expect 3228 ldaxrw(tmp, addr); 3229 cmp(tmp, oldv); 3230 br(Assembler::NE, nope); 3231 // if we store+flush with no intervening write tmp will be zero 3232 stlxrw(tmp, newv, addr); 3233 cbzw(tmp, succeed); 3234 // retry so we only ever return after a load fails to compare 3235 // ensures we don't return a stale value after a failed write. 3236 b(retry_load); 3237 // if the memory word differs we return it in oldv and signal a fail 3238 bind(nope); 3239 membar(AnyAny); 3240 mov(oldv, tmp); 3241 } 3242 if (fail) 3243 b(*fail); 3244 } 3245 3246 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3247 // doesn't retry and may fail spuriously. If the oldval is wanted, 3248 // Pass a register for the result, otherwise pass noreg. 3249 3250 // Clobbers rscratch1 3251 void MacroAssembler::cmpxchg(Register addr, Register expected, 3252 Register new_val, 3253 enum operand_size size, 3254 bool acquire, bool release, 3255 bool weak, 3256 Register result) { 3257 if (result == noreg) result = rscratch1; 3258 BLOCK_COMMENT("cmpxchg {"); 3259 if (UseLSE) { 3260 mov(result, expected); 3261 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3262 compare_eq(result, expected, size); 3263 #ifdef ASSERT 3264 // Poison rscratch1 which is written on !UseLSE branch 3265 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3266 #endif 3267 } else { 3268 Label retry_load, done; 3269 prfm(Address(addr), PSTL1STRM); 3270 bind(retry_load); 3271 load_exclusive(result, addr, size, acquire); 3272 compare_eq(result, expected, size); 3273 br(Assembler::NE, done); 3274 store_exclusive(rscratch1, new_val, addr, size, release); 3275 if (weak) { 3276 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3277 } else { 3278 cbnzw(rscratch1, retry_load); 3279 } 3280 bind(done); 3281 } 3282 BLOCK_COMMENT("} cmpxchg"); 3283 } 3284 3285 // A generic comparison. Only compares for equality, clobbers rscratch1. 3286 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3287 if (size == xword) { 3288 cmp(rm, rn); 3289 } else if (size == word) { 3290 cmpw(rm, rn); 3291 } else if (size == halfword) { 3292 eorw(rscratch1, rm, rn); 3293 ands(zr, rscratch1, 0xffff); 3294 } else if (size == byte) { 3295 eorw(rscratch1, rm, rn); 3296 ands(zr, rscratch1, 0xff); 3297 } else { 3298 ShouldNotReachHere(); 3299 } 3300 } 3301 3302 3303 static bool different(Register a, RegisterOrConstant b, Register c) { 3304 if (b.is_constant()) 3305 return a != c; 3306 else 3307 return a != b.as_register() && a != c && b.as_register() != c; 3308 } 3309 3310 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3311 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3312 if (UseLSE) { \ 3313 prev = prev->is_valid() ? prev : zr; \ 3314 if (incr.is_register()) { \ 3315 AOP(sz, incr.as_register(), prev, addr); \ 3316 } else { \ 3317 mov(rscratch2, incr.as_constant()); \ 3318 AOP(sz, rscratch2, prev, addr); \ 3319 } \ 3320 return; \ 3321 } \ 3322 Register result = rscratch2; \ 3323 if (prev->is_valid()) \ 3324 result = different(prev, incr, addr) ? prev : rscratch2; \ 3325 \ 3326 Label retry_load; \ 3327 prfm(Address(addr), PSTL1STRM); \ 3328 bind(retry_load); \ 3329 LDXR(result, addr); \ 3330 OP(rscratch1, result, incr); \ 3331 STXR(rscratch2, rscratch1, addr); \ 3332 cbnzw(rscratch2, retry_load); \ 3333 if (prev->is_valid() && prev != result) { \ 3334 IOP(prev, rscratch1, incr); \ 3335 } \ 3336 } 3337 3338 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3339 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3340 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3341 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3342 3343 #undef ATOMIC_OP 3344 3345 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3346 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3347 if (UseLSE) { \ 3348 prev = prev->is_valid() ? prev : zr; \ 3349 AOP(sz, newv, prev, addr); \ 3350 return; \ 3351 } \ 3352 Register result = rscratch2; \ 3353 if (prev->is_valid()) \ 3354 result = different(prev, newv, addr) ? prev : rscratch2; \ 3355 \ 3356 Label retry_load; \ 3357 prfm(Address(addr), PSTL1STRM); \ 3358 bind(retry_load); \ 3359 LDXR(result, addr); \ 3360 STXR(rscratch1, newv, addr); \ 3361 cbnzw(rscratch1, retry_load); \ 3362 if (prev->is_valid() && prev != result) \ 3363 mov(prev, result); \ 3364 } 3365 3366 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3367 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3368 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3369 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3370 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3371 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3372 3373 #undef ATOMIC_XCHG 3374 3375 #ifndef PRODUCT 3376 extern "C" void findpc(intptr_t x); 3377 #endif 3378 3379 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3380 { 3381 // In order to get locks to work, we need to fake a in_VM state 3382 if (ShowMessageBoxOnError ) { 3383 JavaThread* thread = JavaThread::current(); 3384 JavaThreadState saved_state = thread->thread_state(); 3385 thread->set_thread_state(_thread_in_vm); 3386 #ifndef PRODUCT 3387 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3388 ttyLocker ttyl; 3389 BytecodeCounter::print(); 3390 } 3391 #endif 3392 if (os::message_box(msg, "Execution stopped, print registers?")) { 3393 ttyLocker ttyl; 3394 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3395 #ifndef PRODUCT 3396 tty->cr(); 3397 findpc(pc); 3398 tty->cr(); 3399 #endif 3400 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3401 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3402 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3403 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3404 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3405 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3406 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3407 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3408 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3409 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3410 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3411 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3412 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3413 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3414 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3415 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3416 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3417 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3418 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3419 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3420 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3421 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3422 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3423 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3424 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3425 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3426 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3427 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3428 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3429 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3430 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3431 BREAKPOINT; 3432 } 3433 } 3434 fatal("DEBUG MESSAGE: %s", msg); 3435 } 3436 3437 RegSet MacroAssembler::call_clobbered_gp_registers() { 3438 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3439 #ifndef R18_RESERVED 3440 regs += r18_tls; 3441 #endif 3442 return regs; 3443 } 3444 3445 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3446 int step = 4 * wordSize; 3447 push(call_clobbered_gp_registers() - exclude, sp); 3448 sub(sp, sp, step); 3449 mov(rscratch1, -step); 3450 // Push v0-v7, v16-v31. 3451 for (int i = 31; i>= 4; i -= 4) { 3452 if (i <= v7->encoding() || i >= v16->encoding()) 3453 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3454 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3455 } 3456 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3457 as_FloatRegister(3), T1D, Address(sp)); 3458 } 3459 3460 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3461 for (int i = 0; i < 32; i += 4) { 3462 if (i <= v7->encoding() || i >= v16->encoding()) 3463 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3464 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3465 } 3466 3467 reinitialize_ptrue(); 3468 3469 pop(call_clobbered_gp_registers() - exclude, sp); 3470 } 3471 3472 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3473 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3474 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3475 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3476 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3477 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3478 sve_str(as_FloatRegister(i), Address(sp, i)); 3479 } 3480 } else { 3481 int step = (save_vectors ? 8 : 4) * wordSize; 3482 mov(rscratch1, -step); 3483 sub(sp, sp, step); 3484 for (int i = 28; i >= 4; i -= 4) { 3485 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3486 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3487 } 3488 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3489 } 3490 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3491 sub(sp, sp, total_predicate_in_bytes); 3492 for (int i = 0; i < PRegister::number_of_registers; i++) { 3493 sve_str(as_PRegister(i), Address(sp, i)); 3494 } 3495 } 3496 } 3497 3498 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3499 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3500 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3501 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3502 sve_ldr(as_PRegister(i), Address(sp, i)); 3503 } 3504 add(sp, sp, total_predicate_in_bytes); 3505 } 3506 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3507 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3508 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3509 } 3510 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3511 } else { 3512 int step = (restore_vectors ? 8 : 4) * wordSize; 3513 for (int i = 0; i <= 28; i += 4) 3514 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3515 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3516 } 3517 3518 // We may use predicate registers and rely on ptrue with SVE, 3519 // regardless of wide vector (> 8 bytes) used or not. 3520 if (use_sve) { 3521 reinitialize_ptrue(); 3522 } 3523 3524 // integer registers except lr & sp 3525 pop(RegSet::range(r0, r17), sp); 3526 #ifdef R18_RESERVED 3527 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3528 pop(RegSet::range(r20, r29), sp); 3529 #else 3530 pop(RegSet::range(r18_tls, r29), sp); 3531 #endif 3532 } 3533 3534 /** 3535 * Helpers for multiply_to_len(). 3536 */ 3537 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3538 Register src1, Register src2) { 3539 adds(dest_lo, dest_lo, src1); 3540 adc(dest_hi, dest_hi, zr); 3541 adds(dest_lo, dest_lo, src2); 3542 adc(final_dest_hi, dest_hi, zr); 3543 } 3544 3545 // Generate an address from (r + r1 extend offset). "size" is the 3546 // size of the operand. The result may be in rscratch2. 3547 Address MacroAssembler::offsetted_address(Register r, Register r1, 3548 Address::extend ext, int offset, int size) { 3549 if (offset || (ext.shift() % size != 0)) { 3550 lea(rscratch2, Address(r, r1, ext)); 3551 return Address(rscratch2, offset); 3552 } else { 3553 return Address(r, r1, ext); 3554 } 3555 } 3556 3557 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3558 { 3559 assert(offset >= 0, "spill to negative address?"); 3560 // Offset reachable ? 3561 // Not aligned - 9 bits signed offset 3562 // Aligned - 12 bits unsigned offset shifted 3563 Register base = sp; 3564 if ((offset & (size-1)) && offset >= (1<<8)) { 3565 add(tmp, base, offset & ((1<<12)-1)); 3566 base = tmp; 3567 offset &= -1u<<12; 3568 } 3569 3570 if (offset >= (1<<12) * size) { 3571 add(tmp, base, offset & (((1<<12)-1)<<12)); 3572 base = tmp; 3573 offset &= ~(((1<<12)-1)<<12); 3574 } 3575 3576 return Address(base, offset); 3577 } 3578 3579 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3580 assert(offset >= 0, "spill to negative address?"); 3581 3582 Register base = sp; 3583 3584 // An immediate offset in the range 0 to 255 which is multiplied 3585 // by the current vector or predicate register size in bytes. 3586 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3587 return Address(base, offset / sve_reg_size_in_bytes); 3588 } 3589 3590 add(tmp, base, offset); 3591 return Address(tmp); 3592 } 3593 3594 // Checks whether offset is aligned. 3595 // Returns true if it is, else false. 3596 bool MacroAssembler::merge_alignment_check(Register base, 3597 size_t size, 3598 int64_t cur_offset, 3599 int64_t prev_offset) const { 3600 if (AvoidUnalignedAccesses) { 3601 if (base == sp) { 3602 // Checks whether low offset if aligned to pair of registers. 3603 int64_t pair_mask = size * 2 - 1; 3604 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3605 return (offset & pair_mask) == 0; 3606 } else { // If base is not sp, we can't guarantee the access is aligned. 3607 return false; 3608 } 3609 } else { 3610 int64_t mask = size - 1; 3611 // Load/store pair instruction only supports element size aligned offset. 3612 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3613 } 3614 } 3615 3616 // Checks whether current and previous loads/stores can be merged. 3617 // Returns true if it can be merged, else false. 3618 bool MacroAssembler::ldst_can_merge(Register rt, 3619 const Address &adr, 3620 size_t cur_size_in_bytes, 3621 bool is_store) const { 3622 address prev = pc() - NativeInstruction::instruction_size; 3623 address last = code()->last_insn(); 3624 3625 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3626 return false; 3627 } 3628 3629 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3630 return false; 3631 } 3632 3633 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3634 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3635 3636 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3637 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3638 3639 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3640 return false; 3641 } 3642 3643 int64_t max_offset = 63 * prev_size_in_bytes; 3644 int64_t min_offset = -64 * prev_size_in_bytes; 3645 3646 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3647 3648 // Only same base can be merged. 3649 if (adr.base() != prev_ldst->base()) { 3650 return false; 3651 } 3652 3653 int64_t cur_offset = adr.offset(); 3654 int64_t prev_offset = prev_ldst->offset(); 3655 size_t diff = abs(cur_offset - prev_offset); 3656 if (diff != prev_size_in_bytes) { 3657 return false; 3658 } 3659 3660 // Following cases can not be merged: 3661 // ldr x2, [x2, #8] 3662 // ldr x3, [x2, #16] 3663 // or: 3664 // ldr x2, [x3, #8] 3665 // ldr x2, [x3, #16] 3666 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3667 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3668 return false; 3669 } 3670 3671 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3672 // Offset range must be in ldp/stp instruction's range. 3673 if (low_offset > max_offset || low_offset < min_offset) { 3674 return false; 3675 } 3676 3677 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3678 return true; 3679 } 3680 3681 return false; 3682 } 3683 3684 // Merge current load/store with previous load/store into ldp/stp. 3685 void MacroAssembler::merge_ldst(Register rt, 3686 const Address &adr, 3687 size_t cur_size_in_bytes, 3688 bool is_store) { 3689 3690 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3691 3692 Register rt_low, rt_high; 3693 address prev = pc() - NativeInstruction::instruction_size; 3694 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3695 3696 int64_t offset; 3697 3698 if (adr.offset() < prev_ldst->offset()) { 3699 offset = adr.offset(); 3700 rt_low = rt; 3701 rt_high = prev_ldst->target(); 3702 } else { 3703 offset = prev_ldst->offset(); 3704 rt_low = prev_ldst->target(); 3705 rt_high = rt; 3706 } 3707 3708 Address adr_p = Address(prev_ldst->base(), offset); 3709 // Overwrite previous generated binary. 3710 code_section()->set_end(prev); 3711 3712 const size_t sz = prev_ldst->size_in_bytes(); 3713 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3714 if (!is_store) { 3715 BLOCK_COMMENT("merged ldr pair"); 3716 if (sz == 8) { 3717 ldp(rt_low, rt_high, adr_p); 3718 } else { 3719 ldpw(rt_low, rt_high, adr_p); 3720 } 3721 } else { 3722 BLOCK_COMMENT("merged str pair"); 3723 if (sz == 8) { 3724 stp(rt_low, rt_high, adr_p); 3725 } else { 3726 stpw(rt_low, rt_high, adr_p); 3727 } 3728 } 3729 } 3730 3731 /** 3732 * Multiply 64 bit by 64 bit first loop. 3733 */ 3734 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3735 Register y, Register y_idx, Register z, 3736 Register carry, Register product, 3737 Register idx, Register kdx) { 3738 // 3739 // jlong carry, x[], y[], z[]; 3740 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3741 // huge_128 product = y[idx] * x[xstart] + carry; 3742 // z[kdx] = (jlong)product; 3743 // carry = (jlong)(product >>> 64); 3744 // } 3745 // z[xstart] = carry; 3746 // 3747 3748 Label L_first_loop, L_first_loop_exit; 3749 Label L_one_x, L_one_y, L_multiply; 3750 3751 subsw(xstart, xstart, 1); 3752 br(Assembler::MI, L_one_x); 3753 3754 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3755 ldr(x_xstart, Address(rscratch1)); 3756 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3757 3758 bind(L_first_loop); 3759 subsw(idx, idx, 1); 3760 br(Assembler::MI, L_first_loop_exit); 3761 subsw(idx, idx, 1); 3762 br(Assembler::MI, L_one_y); 3763 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3764 ldr(y_idx, Address(rscratch1)); 3765 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3766 bind(L_multiply); 3767 3768 // AArch64 has a multiply-accumulate instruction that we can't use 3769 // here because it has no way to process carries, so we have to use 3770 // separate add and adc instructions. Bah. 3771 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3772 mul(product, x_xstart, y_idx); 3773 adds(product, product, carry); 3774 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3775 3776 subw(kdx, kdx, 2); 3777 ror(product, product, 32); // back to big-endian 3778 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3779 3780 b(L_first_loop); 3781 3782 bind(L_one_y); 3783 ldrw(y_idx, Address(y, 0)); 3784 b(L_multiply); 3785 3786 bind(L_one_x); 3787 ldrw(x_xstart, Address(x, 0)); 3788 b(L_first_loop); 3789 3790 bind(L_first_loop_exit); 3791 } 3792 3793 /** 3794 * Multiply 128 bit by 128. Unrolled inner loop. 3795 * 3796 */ 3797 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3798 Register carry, Register carry2, 3799 Register idx, Register jdx, 3800 Register yz_idx1, Register yz_idx2, 3801 Register tmp, Register tmp3, Register tmp4, 3802 Register tmp6, Register product_hi) { 3803 3804 // jlong carry, x[], y[], z[]; 3805 // int kdx = ystart+1; 3806 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3807 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3808 // jlong carry2 = (jlong)(tmp3 >>> 64); 3809 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3810 // carry = (jlong)(tmp4 >>> 64); 3811 // z[kdx+idx+1] = (jlong)tmp3; 3812 // z[kdx+idx] = (jlong)tmp4; 3813 // } 3814 // idx += 2; 3815 // if (idx > 0) { 3816 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3817 // z[kdx+idx] = (jlong)yz_idx1; 3818 // carry = (jlong)(yz_idx1 >>> 64); 3819 // } 3820 // 3821 3822 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3823 3824 lsrw(jdx, idx, 2); 3825 3826 bind(L_third_loop); 3827 3828 subsw(jdx, jdx, 1); 3829 br(Assembler::MI, L_third_loop_exit); 3830 subw(idx, idx, 4); 3831 3832 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3833 3834 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3835 3836 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3837 3838 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3839 ror(yz_idx2, yz_idx2, 32); 3840 3841 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3842 3843 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3844 umulh(tmp4, product_hi, yz_idx1); 3845 3846 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3847 ror(rscratch2, rscratch2, 32); 3848 3849 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3850 umulh(carry2, product_hi, yz_idx2); 3851 3852 // propagate sum of both multiplications into carry:tmp4:tmp3 3853 adds(tmp3, tmp3, carry); 3854 adc(tmp4, tmp4, zr); 3855 adds(tmp3, tmp3, rscratch1); 3856 adcs(tmp4, tmp4, tmp); 3857 adc(carry, carry2, zr); 3858 adds(tmp4, tmp4, rscratch2); 3859 adc(carry, carry, zr); 3860 3861 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3862 ror(tmp4, tmp4, 32); 3863 stp(tmp4, tmp3, Address(tmp6, 0)); 3864 3865 b(L_third_loop); 3866 bind (L_third_loop_exit); 3867 3868 andw (idx, idx, 0x3); 3869 cbz(idx, L_post_third_loop_done); 3870 3871 Label L_check_1; 3872 subsw(idx, idx, 2); 3873 br(Assembler::MI, L_check_1); 3874 3875 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3876 ldr(yz_idx1, Address(rscratch1, 0)); 3877 ror(yz_idx1, yz_idx1, 32); 3878 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3879 umulh(tmp4, product_hi, yz_idx1); 3880 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3881 ldr(yz_idx2, Address(rscratch1, 0)); 3882 ror(yz_idx2, yz_idx2, 32); 3883 3884 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3885 3886 ror(tmp3, tmp3, 32); 3887 str(tmp3, Address(rscratch1, 0)); 3888 3889 bind (L_check_1); 3890 3891 andw (idx, idx, 0x1); 3892 subsw(idx, idx, 1); 3893 br(Assembler::MI, L_post_third_loop_done); 3894 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3895 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3896 umulh(carry2, tmp4, product_hi); 3897 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3898 3899 add2_with_carry(carry2, tmp3, tmp4, carry); 3900 3901 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3902 extr(carry, carry2, tmp3, 32); 3903 3904 bind(L_post_third_loop_done); 3905 } 3906 3907 /** 3908 * Code for BigInteger::multiplyToLen() intrinsic. 3909 * 3910 * r0: x 3911 * r1: xlen 3912 * r2: y 3913 * r3: ylen 3914 * r4: z 3915 * r5: tmp0 3916 * r10: tmp1 3917 * r11: tmp2 3918 * r12: tmp3 3919 * r13: tmp4 3920 * r14: tmp5 3921 * r15: tmp6 3922 * r16: tmp7 3923 * 3924 */ 3925 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3926 Register z, Register tmp0, 3927 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3928 Register tmp5, Register tmp6, Register product_hi) { 3929 3930 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 3931 3932 const Register idx = tmp1; 3933 const Register kdx = tmp2; 3934 const Register xstart = tmp3; 3935 3936 const Register y_idx = tmp4; 3937 const Register carry = tmp5; 3938 const Register product = xlen; 3939 const Register x_xstart = tmp0; 3940 3941 // First Loop. 3942 // 3943 // final static long LONG_MASK = 0xffffffffL; 3944 // int xstart = xlen - 1; 3945 // int ystart = ylen - 1; 3946 // long carry = 0; 3947 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3948 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3949 // z[kdx] = (int)product; 3950 // carry = product >>> 32; 3951 // } 3952 // z[xstart] = (int)carry; 3953 // 3954 3955 movw(idx, ylen); // idx = ylen; 3956 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 3957 mov(carry, zr); // carry = 0; 3958 3959 Label L_done; 3960 3961 movw(xstart, xlen); 3962 subsw(xstart, xstart, 1); 3963 br(Assembler::MI, L_done); 3964 3965 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3966 3967 Label L_second_loop; 3968 cbzw(kdx, L_second_loop); 3969 3970 Label L_carry; 3971 subw(kdx, kdx, 1); 3972 cbzw(kdx, L_carry); 3973 3974 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3975 lsr(carry, carry, 32); 3976 subw(kdx, kdx, 1); 3977 3978 bind(L_carry); 3979 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3980 3981 // Second and third (nested) loops. 3982 // 3983 // for (int i = xstart-1; i >= 0; i--) { // Second loop 3984 // carry = 0; 3985 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 3986 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 3987 // (z[k] & LONG_MASK) + carry; 3988 // z[k] = (int)product; 3989 // carry = product >>> 32; 3990 // } 3991 // z[i] = (int)carry; 3992 // } 3993 // 3994 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 3995 3996 const Register jdx = tmp1; 3997 3998 bind(L_second_loop); 3999 mov(carry, zr); // carry = 0; 4000 movw(jdx, ylen); // j = ystart+1 4001 4002 subsw(xstart, xstart, 1); // i = xstart-1; 4003 br(Assembler::MI, L_done); 4004 4005 str(z, Address(pre(sp, -4 * wordSize))); 4006 4007 Label L_last_x; 4008 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4009 subsw(xstart, xstart, 1); // i = xstart-1; 4010 br(Assembler::MI, L_last_x); 4011 4012 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4013 ldr(product_hi, Address(rscratch1)); 4014 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4015 4016 Label L_third_loop_prologue; 4017 bind(L_third_loop_prologue); 4018 4019 str(ylen, Address(sp, wordSize)); 4020 stp(x, xstart, Address(sp, 2 * wordSize)); 4021 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4022 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4023 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4024 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4025 4026 addw(tmp3, xlen, 1); 4027 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4028 subsw(tmp3, tmp3, 1); 4029 br(Assembler::MI, L_done); 4030 4031 lsr(carry, carry, 32); 4032 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4033 b(L_second_loop); 4034 4035 // Next infrequent code is moved outside loops. 4036 bind(L_last_x); 4037 ldrw(product_hi, Address(x, 0)); 4038 b(L_third_loop_prologue); 4039 4040 bind(L_done); 4041 } 4042 4043 // Code for BigInteger::mulAdd intrinsic 4044 // out = r0 4045 // in = r1 4046 // offset = r2 (already out.length-offset) 4047 // len = r3 4048 // k = r4 4049 // 4050 // pseudo code from java implementation: 4051 // carry = 0; 4052 // offset = out.length-offset - 1; 4053 // for (int j=len-1; j >= 0; j--) { 4054 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4055 // out[offset--] = (int)product; 4056 // carry = product >>> 32; 4057 // } 4058 // return (int)carry; 4059 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4060 Register len, Register k) { 4061 Label LOOP, END; 4062 // pre-loop 4063 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4064 csel(out, zr, out, Assembler::EQ); 4065 br(Assembler::EQ, END); 4066 add(in, in, len, LSL, 2); // in[j+1] address 4067 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4068 mov(out, zr); // used to keep carry now 4069 BIND(LOOP); 4070 ldrw(rscratch1, Address(pre(in, -4))); 4071 madd(rscratch1, rscratch1, k, out); 4072 ldrw(rscratch2, Address(pre(offset, -4))); 4073 add(rscratch1, rscratch1, rscratch2); 4074 strw(rscratch1, Address(offset)); 4075 lsr(out, rscratch1, 32); 4076 subs(len, len, 1); 4077 br(Assembler::NE, LOOP); 4078 BIND(END); 4079 } 4080 4081 /** 4082 * Emits code to update CRC-32 with a byte value according to constants in table 4083 * 4084 * @param [in,out]crc Register containing the crc. 4085 * @param [in]val Register containing the byte to fold into the CRC. 4086 * @param [in]table Register containing the table of crc constants. 4087 * 4088 * uint32_t crc; 4089 * val = crc_table[(val ^ crc) & 0xFF]; 4090 * crc = val ^ (crc >> 8); 4091 * 4092 */ 4093 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4094 eor(val, val, crc); 4095 andr(val, val, 0xff); 4096 ldrw(val, Address(table, val, Address::lsl(2))); 4097 eor(crc, val, crc, Assembler::LSR, 8); 4098 } 4099 4100 /** 4101 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4102 * 4103 * @param [in,out]crc Register containing the crc. 4104 * @param [in]v Register containing the 32-bit to fold into the CRC. 4105 * @param [in]table0 Register containing table 0 of crc constants. 4106 * @param [in]table1 Register containing table 1 of crc constants. 4107 * @param [in]table2 Register containing table 2 of crc constants. 4108 * @param [in]table3 Register containing table 3 of crc constants. 4109 * 4110 * uint32_t crc; 4111 * v = crc ^ v 4112 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4113 * 4114 */ 4115 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4116 Register table0, Register table1, Register table2, Register table3, 4117 bool upper) { 4118 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4119 uxtb(tmp, v); 4120 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4121 ubfx(tmp, v, 8, 8); 4122 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4123 eor(crc, crc, tmp); 4124 ubfx(tmp, v, 16, 8); 4125 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4126 eor(crc, crc, tmp); 4127 ubfx(tmp, v, 24, 8); 4128 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4129 eor(crc, crc, tmp); 4130 } 4131 4132 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4133 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4134 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4135 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4136 4137 subs(tmp0, len, 384); 4138 mvnw(crc, crc); 4139 br(Assembler::GE, CRC_by128_pre); 4140 BIND(CRC_less128); 4141 subs(len, len, 32); 4142 br(Assembler::GE, CRC_by32_loop); 4143 BIND(CRC_less32); 4144 adds(len, len, 32 - 4); 4145 br(Assembler::GE, CRC_by4_loop); 4146 adds(len, len, 4); 4147 br(Assembler::GT, CRC_by1_loop); 4148 b(L_exit); 4149 4150 BIND(CRC_by32_loop); 4151 ldp(tmp0, tmp1, Address(buf)); 4152 crc32x(crc, crc, tmp0); 4153 ldp(tmp2, tmp3, Address(buf, 16)); 4154 crc32x(crc, crc, tmp1); 4155 add(buf, buf, 32); 4156 crc32x(crc, crc, tmp2); 4157 subs(len, len, 32); 4158 crc32x(crc, crc, tmp3); 4159 br(Assembler::GE, CRC_by32_loop); 4160 cmn(len, (u1)32); 4161 br(Assembler::NE, CRC_less32); 4162 b(L_exit); 4163 4164 BIND(CRC_by4_loop); 4165 ldrw(tmp0, Address(post(buf, 4))); 4166 subs(len, len, 4); 4167 crc32w(crc, crc, tmp0); 4168 br(Assembler::GE, CRC_by4_loop); 4169 adds(len, len, 4); 4170 br(Assembler::LE, L_exit); 4171 BIND(CRC_by1_loop); 4172 ldrb(tmp0, Address(post(buf, 1))); 4173 subs(len, len, 1); 4174 crc32b(crc, crc, tmp0); 4175 br(Assembler::GT, CRC_by1_loop); 4176 b(L_exit); 4177 4178 BIND(CRC_by128_pre); 4179 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4180 4*256*sizeof(juint) + 8*sizeof(juint)); 4181 mov(crc, 0); 4182 crc32x(crc, crc, tmp0); 4183 crc32x(crc, crc, tmp1); 4184 4185 cbnz(len, CRC_less128); 4186 4187 BIND(L_exit); 4188 mvnw(crc, crc); 4189 } 4190 4191 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4192 Register len, Register tmp0, Register tmp1, Register tmp2, 4193 Register tmp3) { 4194 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4195 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4196 4197 mvnw(crc, crc); 4198 4199 subs(len, len, 128); 4200 br(Assembler::GE, CRC_by64_pre); 4201 BIND(CRC_less64); 4202 adds(len, len, 128-32); 4203 br(Assembler::GE, CRC_by32_loop); 4204 BIND(CRC_less32); 4205 adds(len, len, 32-4); 4206 br(Assembler::GE, CRC_by4_loop); 4207 adds(len, len, 4); 4208 br(Assembler::GT, CRC_by1_loop); 4209 b(L_exit); 4210 4211 BIND(CRC_by32_loop); 4212 ldp(tmp0, tmp1, Address(post(buf, 16))); 4213 subs(len, len, 32); 4214 crc32x(crc, crc, tmp0); 4215 ldr(tmp2, Address(post(buf, 8))); 4216 crc32x(crc, crc, tmp1); 4217 ldr(tmp3, Address(post(buf, 8))); 4218 crc32x(crc, crc, tmp2); 4219 crc32x(crc, crc, tmp3); 4220 br(Assembler::GE, CRC_by32_loop); 4221 cmn(len, (u1)32); 4222 br(Assembler::NE, CRC_less32); 4223 b(L_exit); 4224 4225 BIND(CRC_by4_loop); 4226 ldrw(tmp0, Address(post(buf, 4))); 4227 subs(len, len, 4); 4228 crc32w(crc, crc, tmp0); 4229 br(Assembler::GE, CRC_by4_loop); 4230 adds(len, len, 4); 4231 br(Assembler::LE, L_exit); 4232 BIND(CRC_by1_loop); 4233 ldrb(tmp0, Address(post(buf, 1))); 4234 subs(len, len, 1); 4235 crc32b(crc, crc, tmp0); 4236 br(Assembler::GT, CRC_by1_loop); 4237 b(L_exit); 4238 4239 BIND(CRC_by64_pre); 4240 sub(buf, buf, 8); 4241 ldp(tmp0, tmp1, Address(buf, 8)); 4242 crc32x(crc, crc, tmp0); 4243 ldr(tmp2, Address(buf, 24)); 4244 crc32x(crc, crc, tmp1); 4245 ldr(tmp3, Address(buf, 32)); 4246 crc32x(crc, crc, tmp2); 4247 ldr(tmp0, Address(buf, 40)); 4248 crc32x(crc, crc, tmp3); 4249 ldr(tmp1, Address(buf, 48)); 4250 crc32x(crc, crc, tmp0); 4251 ldr(tmp2, Address(buf, 56)); 4252 crc32x(crc, crc, tmp1); 4253 ldr(tmp3, Address(pre(buf, 64))); 4254 4255 b(CRC_by64_loop); 4256 4257 align(CodeEntryAlignment); 4258 BIND(CRC_by64_loop); 4259 subs(len, len, 64); 4260 crc32x(crc, crc, tmp2); 4261 ldr(tmp0, Address(buf, 8)); 4262 crc32x(crc, crc, tmp3); 4263 ldr(tmp1, Address(buf, 16)); 4264 crc32x(crc, crc, tmp0); 4265 ldr(tmp2, Address(buf, 24)); 4266 crc32x(crc, crc, tmp1); 4267 ldr(tmp3, Address(buf, 32)); 4268 crc32x(crc, crc, tmp2); 4269 ldr(tmp0, Address(buf, 40)); 4270 crc32x(crc, crc, tmp3); 4271 ldr(tmp1, Address(buf, 48)); 4272 crc32x(crc, crc, tmp0); 4273 ldr(tmp2, Address(buf, 56)); 4274 crc32x(crc, crc, tmp1); 4275 ldr(tmp3, Address(pre(buf, 64))); 4276 br(Assembler::GE, CRC_by64_loop); 4277 4278 // post-loop 4279 crc32x(crc, crc, tmp2); 4280 crc32x(crc, crc, tmp3); 4281 4282 sub(len, len, 64); 4283 add(buf, buf, 8); 4284 cmn(len, (u1)128); 4285 br(Assembler::NE, CRC_less64); 4286 BIND(L_exit); 4287 mvnw(crc, crc); 4288 } 4289 4290 /** 4291 * @param crc register containing existing CRC (32-bit) 4292 * @param buf register pointing to input byte buffer (byte*) 4293 * @param len register containing number of bytes 4294 * @param table register that will contain address of CRC table 4295 * @param tmp scratch register 4296 */ 4297 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4298 Register table0, Register table1, Register table2, Register table3, 4299 Register tmp, Register tmp2, Register tmp3) { 4300 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4301 4302 if (UseCryptoPmullForCRC32) { 4303 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4304 return; 4305 } 4306 4307 if (UseCRC32) { 4308 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4309 return; 4310 } 4311 4312 mvnw(crc, crc); 4313 4314 { 4315 uint64_t offset; 4316 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4317 add(table0, table0, offset); 4318 } 4319 add(table1, table0, 1*256*sizeof(juint)); 4320 add(table2, table0, 2*256*sizeof(juint)); 4321 add(table3, table0, 3*256*sizeof(juint)); 4322 4323 { // Neon code start 4324 cmp(len, (u1)64); 4325 br(Assembler::LT, L_by16); 4326 eor(v16, T16B, v16, v16); 4327 4328 Label L_fold; 4329 4330 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4331 4332 ld1(v0, v1, T2D, post(buf, 32)); 4333 ld1r(v4, T2D, post(tmp, 8)); 4334 ld1r(v5, T2D, post(tmp, 8)); 4335 ld1r(v6, T2D, post(tmp, 8)); 4336 ld1r(v7, T2D, post(tmp, 8)); 4337 mov(v16, S, 0, crc); 4338 4339 eor(v0, T16B, v0, v16); 4340 sub(len, len, 64); 4341 4342 BIND(L_fold); 4343 pmull(v22, T8H, v0, v5, T8B); 4344 pmull(v20, T8H, v0, v7, T8B); 4345 pmull(v23, T8H, v0, v4, T8B); 4346 pmull(v21, T8H, v0, v6, T8B); 4347 4348 pmull2(v18, T8H, v0, v5, T16B); 4349 pmull2(v16, T8H, v0, v7, T16B); 4350 pmull2(v19, T8H, v0, v4, T16B); 4351 pmull2(v17, T8H, v0, v6, T16B); 4352 4353 uzp1(v24, T8H, v20, v22); 4354 uzp2(v25, T8H, v20, v22); 4355 eor(v20, T16B, v24, v25); 4356 4357 uzp1(v26, T8H, v16, v18); 4358 uzp2(v27, T8H, v16, v18); 4359 eor(v16, T16B, v26, v27); 4360 4361 ushll2(v22, T4S, v20, T8H, 8); 4362 ushll(v20, T4S, v20, T4H, 8); 4363 4364 ushll2(v18, T4S, v16, T8H, 8); 4365 ushll(v16, T4S, v16, T4H, 8); 4366 4367 eor(v22, T16B, v23, v22); 4368 eor(v18, T16B, v19, v18); 4369 eor(v20, T16B, v21, v20); 4370 eor(v16, T16B, v17, v16); 4371 4372 uzp1(v17, T2D, v16, v20); 4373 uzp2(v21, T2D, v16, v20); 4374 eor(v17, T16B, v17, v21); 4375 4376 ushll2(v20, T2D, v17, T4S, 16); 4377 ushll(v16, T2D, v17, T2S, 16); 4378 4379 eor(v20, T16B, v20, v22); 4380 eor(v16, T16B, v16, v18); 4381 4382 uzp1(v17, T2D, v20, v16); 4383 uzp2(v21, T2D, v20, v16); 4384 eor(v28, T16B, v17, v21); 4385 4386 pmull(v22, T8H, v1, v5, T8B); 4387 pmull(v20, T8H, v1, v7, T8B); 4388 pmull(v23, T8H, v1, v4, T8B); 4389 pmull(v21, T8H, v1, v6, T8B); 4390 4391 pmull2(v18, T8H, v1, v5, T16B); 4392 pmull2(v16, T8H, v1, v7, T16B); 4393 pmull2(v19, T8H, v1, v4, T16B); 4394 pmull2(v17, T8H, v1, v6, T16B); 4395 4396 ld1(v0, v1, T2D, post(buf, 32)); 4397 4398 uzp1(v24, T8H, v20, v22); 4399 uzp2(v25, T8H, v20, v22); 4400 eor(v20, T16B, v24, v25); 4401 4402 uzp1(v26, T8H, v16, v18); 4403 uzp2(v27, T8H, v16, v18); 4404 eor(v16, T16B, v26, v27); 4405 4406 ushll2(v22, T4S, v20, T8H, 8); 4407 ushll(v20, T4S, v20, T4H, 8); 4408 4409 ushll2(v18, T4S, v16, T8H, 8); 4410 ushll(v16, T4S, v16, T4H, 8); 4411 4412 eor(v22, T16B, v23, v22); 4413 eor(v18, T16B, v19, v18); 4414 eor(v20, T16B, v21, v20); 4415 eor(v16, T16B, v17, v16); 4416 4417 uzp1(v17, T2D, v16, v20); 4418 uzp2(v21, T2D, v16, v20); 4419 eor(v16, T16B, v17, v21); 4420 4421 ushll2(v20, T2D, v16, T4S, 16); 4422 ushll(v16, T2D, v16, T2S, 16); 4423 4424 eor(v20, T16B, v22, v20); 4425 eor(v16, T16B, v16, v18); 4426 4427 uzp1(v17, T2D, v20, v16); 4428 uzp2(v21, T2D, v20, v16); 4429 eor(v20, T16B, v17, v21); 4430 4431 shl(v16, T2D, v28, 1); 4432 shl(v17, T2D, v20, 1); 4433 4434 eor(v0, T16B, v0, v16); 4435 eor(v1, T16B, v1, v17); 4436 4437 subs(len, len, 32); 4438 br(Assembler::GE, L_fold); 4439 4440 mov(crc, 0); 4441 mov(tmp, v0, D, 0); 4442 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4443 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4444 mov(tmp, v0, D, 1); 4445 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4446 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4447 mov(tmp, v1, D, 0); 4448 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4449 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4450 mov(tmp, v1, D, 1); 4451 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4452 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4453 4454 add(len, len, 32); 4455 } // Neon code end 4456 4457 BIND(L_by16); 4458 subs(len, len, 16); 4459 br(Assembler::GE, L_by16_loop); 4460 adds(len, len, 16-4); 4461 br(Assembler::GE, L_by4_loop); 4462 adds(len, len, 4); 4463 br(Assembler::GT, L_by1_loop); 4464 b(L_exit); 4465 4466 BIND(L_by4_loop); 4467 ldrw(tmp, Address(post(buf, 4))); 4468 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4469 subs(len, len, 4); 4470 br(Assembler::GE, L_by4_loop); 4471 adds(len, len, 4); 4472 br(Assembler::LE, L_exit); 4473 BIND(L_by1_loop); 4474 subs(len, len, 1); 4475 ldrb(tmp, Address(post(buf, 1))); 4476 update_byte_crc32(crc, tmp, table0); 4477 br(Assembler::GT, L_by1_loop); 4478 b(L_exit); 4479 4480 align(CodeEntryAlignment); 4481 BIND(L_by16_loop); 4482 subs(len, len, 16); 4483 ldp(tmp, tmp3, Address(post(buf, 16))); 4484 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4485 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4486 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4487 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4488 br(Assembler::GE, L_by16_loop); 4489 adds(len, len, 16-4); 4490 br(Assembler::GE, L_by4_loop); 4491 adds(len, len, 4); 4492 br(Assembler::GT, L_by1_loop); 4493 BIND(L_exit); 4494 mvnw(crc, crc); 4495 } 4496 4497 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4498 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4499 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4500 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4501 4502 subs(tmp0, len, 384); 4503 br(Assembler::GE, CRC_by128_pre); 4504 BIND(CRC_less128); 4505 subs(len, len, 32); 4506 br(Assembler::GE, CRC_by32_loop); 4507 BIND(CRC_less32); 4508 adds(len, len, 32 - 4); 4509 br(Assembler::GE, CRC_by4_loop); 4510 adds(len, len, 4); 4511 br(Assembler::GT, CRC_by1_loop); 4512 b(L_exit); 4513 4514 BIND(CRC_by32_loop); 4515 ldp(tmp0, tmp1, Address(buf)); 4516 crc32cx(crc, crc, tmp0); 4517 ldr(tmp2, Address(buf, 16)); 4518 crc32cx(crc, crc, tmp1); 4519 ldr(tmp3, Address(buf, 24)); 4520 crc32cx(crc, crc, tmp2); 4521 add(buf, buf, 32); 4522 subs(len, len, 32); 4523 crc32cx(crc, crc, tmp3); 4524 br(Assembler::GE, CRC_by32_loop); 4525 cmn(len, (u1)32); 4526 br(Assembler::NE, CRC_less32); 4527 b(L_exit); 4528 4529 BIND(CRC_by4_loop); 4530 ldrw(tmp0, Address(post(buf, 4))); 4531 subs(len, len, 4); 4532 crc32cw(crc, crc, tmp0); 4533 br(Assembler::GE, CRC_by4_loop); 4534 adds(len, len, 4); 4535 br(Assembler::LE, L_exit); 4536 BIND(CRC_by1_loop); 4537 ldrb(tmp0, Address(post(buf, 1))); 4538 subs(len, len, 1); 4539 crc32cb(crc, crc, tmp0); 4540 br(Assembler::GT, CRC_by1_loop); 4541 b(L_exit); 4542 4543 BIND(CRC_by128_pre); 4544 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4545 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4546 mov(crc, 0); 4547 crc32cx(crc, crc, tmp0); 4548 crc32cx(crc, crc, tmp1); 4549 4550 cbnz(len, CRC_less128); 4551 4552 BIND(L_exit); 4553 } 4554 4555 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4556 Register len, Register tmp0, Register tmp1, Register tmp2, 4557 Register tmp3) { 4558 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4559 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4560 4561 subs(len, len, 128); 4562 br(Assembler::GE, CRC_by64_pre); 4563 BIND(CRC_less64); 4564 adds(len, len, 128-32); 4565 br(Assembler::GE, CRC_by32_loop); 4566 BIND(CRC_less32); 4567 adds(len, len, 32-4); 4568 br(Assembler::GE, CRC_by4_loop); 4569 adds(len, len, 4); 4570 br(Assembler::GT, CRC_by1_loop); 4571 b(L_exit); 4572 4573 BIND(CRC_by32_loop); 4574 ldp(tmp0, tmp1, Address(post(buf, 16))); 4575 subs(len, len, 32); 4576 crc32cx(crc, crc, tmp0); 4577 ldr(tmp2, Address(post(buf, 8))); 4578 crc32cx(crc, crc, tmp1); 4579 ldr(tmp3, Address(post(buf, 8))); 4580 crc32cx(crc, crc, tmp2); 4581 crc32cx(crc, crc, tmp3); 4582 br(Assembler::GE, CRC_by32_loop); 4583 cmn(len, (u1)32); 4584 br(Assembler::NE, CRC_less32); 4585 b(L_exit); 4586 4587 BIND(CRC_by4_loop); 4588 ldrw(tmp0, Address(post(buf, 4))); 4589 subs(len, len, 4); 4590 crc32cw(crc, crc, tmp0); 4591 br(Assembler::GE, CRC_by4_loop); 4592 adds(len, len, 4); 4593 br(Assembler::LE, L_exit); 4594 BIND(CRC_by1_loop); 4595 ldrb(tmp0, Address(post(buf, 1))); 4596 subs(len, len, 1); 4597 crc32cb(crc, crc, tmp0); 4598 br(Assembler::GT, CRC_by1_loop); 4599 b(L_exit); 4600 4601 BIND(CRC_by64_pre); 4602 sub(buf, buf, 8); 4603 ldp(tmp0, tmp1, Address(buf, 8)); 4604 crc32cx(crc, crc, tmp0); 4605 ldr(tmp2, Address(buf, 24)); 4606 crc32cx(crc, crc, tmp1); 4607 ldr(tmp3, Address(buf, 32)); 4608 crc32cx(crc, crc, tmp2); 4609 ldr(tmp0, Address(buf, 40)); 4610 crc32cx(crc, crc, tmp3); 4611 ldr(tmp1, Address(buf, 48)); 4612 crc32cx(crc, crc, tmp0); 4613 ldr(tmp2, Address(buf, 56)); 4614 crc32cx(crc, crc, tmp1); 4615 ldr(tmp3, Address(pre(buf, 64))); 4616 4617 b(CRC_by64_loop); 4618 4619 align(CodeEntryAlignment); 4620 BIND(CRC_by64_loop); 4621 subs(len, len, 64); 4622 crc32cx(crc, crc, tmp2); 4623 ldr(tmp0, Address(buf, 8)); 4624 crc32cx(crc, crc, tmp3); 4625 ldr(tmp1, Address(buf, 16)); 4626 crc32cx(crc, crc, tmp0); 4627 ldr(tmp2, Address(buf, 24)); 4628 crc32cx(crc, crc, tmp1); 4629 ldr(tmp3, Address(buf, 32)); 4630 crc32cx(crc, crc, tmp2); 4631 ldr(tmp0, Address(buf, 40)); 4632 crc32cx(crc, crc, tmp3); 4633 ldr(tmp1, Address(buf, 48)); 4634 crc32cx(crc, crc, tmp0); 4635 ldr(tmp2, Address(buf, 56)); 4636 crc32cx(crc, crc, tmp1); 4637 ldr(tmp3, Address(pre(buf, 64))); 4638 br(Assembler::GE, CRC_by64_loop); 4639 4640 // post-loop 4641 crc32cx(crc, crc, tmp2); 4642 crc32cx(crc, crc, tmp3); 4643 4644 sub(len, len, 64); 4645 add(buf, buf, 8); 4646 cmn(len, (u1)128); 4647 br(Assembler::NE, CRC_less64); 4648 BIND(L_exit); 4649 } 4650 4651 /** 4652 * @param crc register containing existing CRC (32-bit) 4653 * @param buf register pointing to input byte buffer (byte*) 4654 * @param len register containing number of bytes 4655 * @param table register that will contain address of CRC table 4656 * @param tmp scratch register 4657 */ 4658 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4659 Register table0, Register table1, Register table2, Register table3, 4660 Register tmp, Register tmp2, Register tmp3) { 4661 if (UseCryptoPmullForCRC32) { 4662 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4663 } else { 4664 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4665 } 4666 } 4667 4668 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4669 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4670 Label CRC_by128_loop; 4671 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4672 4673 sub(len, len, 256); 4674 Register table = tmp0; 4675 { 4676 uint64_t offset; 4677 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4678 add(table, table, offset); 4679 } 4680 add(table, table, table_offset); 4681 4682 // Registers v0..v7 are used as data registers. 4683 // Registers v16..v31 are used as tmp registers. 4684 sub(buf, buf, 0x10); 4685 ldrq(v0, Address(buf, 0x10)); 4686 ldrq(v1, Address(buf, 0x20)); 4687 ldrq(v2, Address(buf, 0x30)); 4688 ldrq(v3, Address(buf, 0x40)); 4689 ldrq(v4, Address(buf, 0x50)); 4690 ldrq(v5, Address(buf, 0x60)); 4691 ldrq(v6, Address(buf, 0x70)); 4692 ldrq(v7, Address(pre(buf, 0x80))); 4693 4694 movi(v31, T4S, 0); 4695 mov(v31, S, 0, crc); 4696 eor(v0, T16B, v0, v31); 4697 4698 // Register v16 contains constants from the crc table. 4699 ldrq(v16, Address(table)); 4700 b(CRC_by128_loop); 4701 4702 align(OptoLoopAlignment); 4703 BIND(CRC_by128_loop); 4704 pmull (v17, T1Q, v0, v16, T1D); 4705 pmull2(v18, T1Q, v0, v16, T2D); 4706 ldrq(v0, Address(buf, 0x10)); 4707 eor3(v0, T16B, v17, v18, v0); 4708 4709 pmull (v19, T1Q, v1, v16, T1D); 4710 pmull2(v20, T1Q, v1, v16, T2D); 4711 ldrq(v1, Address(buf, 0x20)); 4712 eor3(v1, T16B, v19, v20, v1); 4713 4714 pmull (v21, T1Q, v2, v16, T1D); 4715 pmull2(v22, T1Q, v2, v16, T2D); 4716 ldrq(v2, Address(buf, 0x30)); 4717 eor3(v2, T16B, v21, v22, v2); 4718 4719 pmull (v23, T1Q, v3, v16, T1D); 4720 pmull2(v24, T1Q, v3, v16, T2D); 4721 ldrq(v3, Address(buf, 0x40)); 4722 eor3(v3, T16B, v23, v24, v3); 4723 4724 pmull (v25, T1Q, v4, v16, T1D); 4725 pmull2(v26, T1Q, v4, v16, T2D); 4726 ldrq(v4, Address(buf, 0x50)); 4727 eor3(v4, T16B, v25, v26, v4); 4728 4729 pmull (v27, T1Q, v5, v16, T1D); 4730 pmull2(v28, T1Q, v5, v16, T2D); 4731 ldrq(v5, Address(buf, 0x60)); 4732 eor3(v5, T16B, v27, v28, v5); 4733 4734 pmull (v29, T1Q, v6, v16, T1D); 4735 pmull2(v30, T1Q, v6, v16, T2D); 4736 ldrq(v6, Address(buf, 0x70)); 4737 eor3(v6, T16B, v29, v30, v6); 4738 4739 // Reuse registers v23, v24. 4740 // Using them won't block the first instruction of the next iteration. 4741 pmull (v23, T1Q, v7, v16, T1D); 4742 pmull2(v24, T1Q, v7, v16, T2D); 4743 ldrq(v7, Address(pre(buf, 0x80))); 4744 eor3(v7, T16B, v23, v24, v7); 4745 4746 subs(len, len, 0x80); 4747 br(Assembler::GE, CRC_by128_loop); 4748 4749 // fold into 512 bits 4750 // Use v31 for constants because v16 can be still in use. 4751 ldrq(v31, Address(table, 0x10)); 4752 4753 pmull (v17, T1Q, v0, v31, T1D); 4754 pmull2(v18, T1Q, v0, v31, T2D); 4755 eor3(v0, T16B, v17, v18, v4); 4756 4757 pmull (v19, T1Q, v1, v31, T1D); 4758 pmull2(v20, T1Q, v1, v31, T2D); 4759 eor3(v1, T16B, v19, v20, v5); 4760 4761 pmull (v21, T1Q, v2, v31, T1D); 4762 pmull2(v22, T1Q, v2, v31, T2D); 4763 eor3(v2, T16B, v21, v22, v6); 4764 4765 pmull (v23, T1Q, v3, v31, T1D); 4766 pmull2(v24, T1Q, v3, v31, T2D); 4767 eor3(v3, T16B, v23, v24, v7); 4768 4769 // fold into 128 bits 4770 // Use v17 for constants because v31 can be still in use. 4771 ldrq(v17, Address(table, 0x20)); 4772 pmull (v25, T1Q, v0, v17, T1D); 4773 pmull2(v26, T1Q, v0, v17, T2D); 4774 eor3(v3, T16B, v3, v25, v26); 4775 4776 // Use v18 for constants because v17 can be still in use. 4777 ldrq(v18, Address(table, 0x30)); 4778 pmull (v27, T1Q, v1, v18, T1D); 4779 pmull2(v28, T1Q, v1, v18, T2D); 4780 eor3(v3, T16B, v3, v27, v28); 4781 4782 // Use v19 for constants because v18 can be still in use. 4783 ldrq(v19, Address(table, 0x40)); 4784 pmull (v29, T1Q, v2, v19, T1D); 4785 pmull2(v30, T1Q, v2, v19, T2D); 4786 eor3(v0, T16B, v3, v29, v30); 4787 4788 add(len, len, 0x80); 4789 add(buf, buf, 0x10); 4790 4791 mov(tmp0, v0, D, 0); 4792 mov(tmp1, v0, D, 1); 4793 } 4794 4795 SkipIfEqual::SkipIfEqual( 4796 MacroAssembler* masm, const bool* flag_addr, bool value) { 4797 _masm = masm; 4798 uint64_t offset; 4799 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4800 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4801 if (value) { 4802 _masm->cbnzw(rscratch1, _label); 4803 } else { 4804 _masm->cbzw(rscratch1, _label); 4805 } 4806 } 4807 4808 SkipIfEqual::~SkipIfEqual() { 4809 _masm->bind(_label); 4810 } 4811 4812 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4813 Address adr; 4814 switch(dst.getMode()) { 4815 case Address::base_plus_offset: 4816 // This is the expected mode, although we allow all the other 4817 // forms below. 4818 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4819 break; 4820 default: 4821 lea(rscratch2, dst); 4822 adr = Address(rscratch2); 4823 break; 4824 } 4825 ldr(rscratch1, adr); 4826 add(rscratch1, rscratch1, src); 4827 str(rscratch1, adr); 4828 } 4829 4830 void MacroAssembler::cmpptr(Register src1, Address src2) { 4831 uint64_t offset; 4832 adrp(rscratch1, src2, offset); 4833 ldr(rscratch1, Address(rscratch1, offset)); 4834 cmp(src1, rscratch1); 4835 } 4836 4837 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4838 cmp(obj1, obj2); 4839 } 4840 4841 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4842 load_method_holder(rresult, rmethod); 4843 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4844 } 4845 4846 void MacroAssembler::load_method_holder(Register holder, Register method) { 4847 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4848 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4849 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4850 } 4851 4852 void MacroAssembler::load_klass(Register dst, Register src) { 4853 if (UseCompressedClassPointers) { 4854 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4855 decode_klass_not_null(dst); 4856 } else { 4857 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4858 } 4859 } 4860 4861 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4862 if (RestoreMXCSROnJNICalls) { 4863 Label OK; 4864 get_fpcr(tmp1); 4865 mov(tmp2, tmp1); 4866 // Set FPCR to the state we need. We do want Round to Nearest. We 4867 // don't want non-IEEE rounding modes or floating-point traps. 4868 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4869 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4870 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4871 eor(tmp2, tmp1, tmp2); 4872 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4873 set_fpcr(tmp1); 4874 bind(OK); 4875 } 4876 } 4877 4878 // ((OopHandle)result).resolve(); 4879 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4880 // OopHandle::resolve is an indirection. 4881 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4882 } 4883 4884 // ((WeakHandle)result).resolve(); 4885 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4886 assert_different_registers(result, tmp1, tmp2); 4887 Label resolved; 4888 4889 // A null weak handle resolves to null. 4890 cbz(result, resolved); 4891 4892 // Only 64 bit platforms support GCs that require a tmp register 4893 // WeakHandle::resolve is an indirection like jweak. 4894 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4895 result, Address(result), tmp1, tmp2); 4896 bind(resolved); 4897 } 4898 4899 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4900 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4901 ldr(dst, Address(rmethod, Method::const_offset())); 4902 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4903 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4904 ldr(dst, Address(dst, mirror_offset)); 4905 resolve_oop_handle(dst, tmp1, tmp2); 4906 } 4907 4908 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4909 if (UseCompressedClassPointers) { 4910 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4911 if (CompressedKlassPointers::base() == nullptr) { 4912 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4913 return; 4914 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4915 && CompressedKlassPointers::shift() == 0) { 4916 // Only the bottom 32 bits matter 4917 cmpw(trial_klass, tmp); 4918 return; 4919 } 4920 decode_klass_not_null(tmp); 4921 } else { 4922 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4923 } 4924 cmp(trial_klass, tmp); 4925 } 4926 4927 void MacroAssembler::store_klass(Register dst, Register src) { 4928 // FIXME: Should this be a store release? concurrent gcs assumes 4929 // klass length is valid if klass field is not null. 4930 if (UseCompressedClassPointers) { 4931 encode_klass_not_null(src); 4932 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4933 } else { 4934 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4935 } 4936 } 4937 4938 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4939 if (UseCompressedClassPointers) { 4940 // Store to klass gap in destination 4941 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4942 } 4943 } 4944 4945 // Algorithm must match CompressedOops::encode. 4946 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4947 #ifdef ASSERT 4948 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4949 #endif 4950 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4951 if (CompressedOops::base() == nullptr) { 4952 if (CompressedOops::shift() != 0) { 4953 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4954 lsr(d, s, LogMinObjAlignmentInBytes); 4955 } else { 4956 mov(d, s); 4957 } 4958 } else { 4959 subs(d, s, rheapbase); 4960 csel(d, d, zr, Assembler::HS); 4961 lsr(d, d, LogMinObjAlignmentInBytes); 4962 4963 /* Old algorithm: is this any worse? 4964 Label nonnull; 4965 cbnz(r, nonnull); 4966 sub(r, r, rheapbase); 4967 bind(nonnull); 4968 lsr(r, r, LogMinObjAlignmentInBytes); 4969 */ 4970 } 4971 } 4972 4973 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4974 #ifdef ASSERT 4975 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4976 if (CheckCompressedOops) { 4977 Label ok; 4978 cbnz(r, ok); 4979 stop("null oop passed to encode_heap_oop_not_null"); 4980 bind(ok); 4981 } 4982 #endif 4983 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 4984 if (CompressedOops::base() != nullptr) { 4985 sub(r, r, rheapbase); 4986 } 4987 if (CompressedOops::shift() != 0) { 4988 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4989 lsr(r, r, LogMinObjAlignmentInBytes); 4990 } 4991 } 4992 4993 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 4994 #ifdef ASSERT 4995 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 4996 if (CheckCompressedOops) { 4997 Label ok; 4998 cbnz(src, ok); 4999 stop("null oop passed to encode_heap_oop_not_null2"); 5000 bind(ok); 5001 } 5002 #endif 5003 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5004 5005 Register data = src; 5006 if (CompressedOops::base() != nullptr) { 5007 sub(dst, src, rheapbase); 5008 data = dst; 5009 } 5010 if (CompressedOops::shift() != 0) { 5011 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5012 lsr(dst, data, LogMinObjAlignmentInBytes); 5013 data = dst; 5014 } 5015 if (data == src) 5016 mov(dst, src); 5017 } 5018 5019 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5020 #ifdef ASSERT 5021 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5022 #endif 5023 if (CompressedOops::base() == nullptr) { 5024 if (CompressedOops::shift() != 0 || d != s) { 5025 lsl(d, s, CompressedOops::shift()); 5026 } 5027 } else { 5028 Label done; 5029 if (d != s) 5030 mov(d, s); 5031 cbz(s, done); 5032 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5033 bind(done); 5034 } 5035 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5036 } 5037 5038 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5039 assert (UseCompressedOops, "should only be used for compressed headers"); 5040 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5041 // Cannot assert, unverified entry point counts instructions (see .ad file) 5042 // vtableStubs also counts instructions in pd_code_size_limit. 5043 // Also do not verify_oop as this is called by verify_oop. 5044 if (CompressedOops::shift() != 0) { 5045 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5046 if (CompressedOops::base() != nullptr) { 5047 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5048 } else { 5049 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5050 } 5051 } else { 5052 assert (CompressedOops::base() == nullptr, "sanity"); 5053 } 5054 } 5055 5056 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5057 assert (UseCompressedOops, "should only be used for compressed headers"); 5058 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5059 // Cannot assert, unverified entry point counts instructions (see .ad file) 5060 // vtableStubs also counts instructions in pd_code_size_limit. 5061 // Also do not verify_oop as this is called by verify_oop. 5062 if (CompressedOops::shift() != 0) { 5063 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5064 if (CompressedOops::base() != nullptr) { 5065 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5066 } else { 5067 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5068 } 5069 } else { 5070 assert (CompressedOops::base() == nullptr, "sanity"); 5071 if (dst != src) { 5072 mov(dst, src); 5073 } 5074 } 5075 } 5076 5077 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5078 5079 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5080 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5081 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5082 5083 if (_klass_decode_mode != KlassDecodeNone) { 5084 return _klass_decode_mode; 5085 } 5086 5087 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 5088 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 5089 5090 if (CompressedKlassPointers::base() == nullptr) { 5091 return (_klass_decode_mode = KlassDecodeZero); 5092 } 5093 5094 if (operand_valid_for_logical_immediate( 5095 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 5096 const uint64_t range_mask = 5097 (1ULL << log2i(CompressedKlassPointers::range())) - 1; 5098 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 5099 return (_klass_decode_mode = KlassDecodeXor); 5100 } 5101 } 5102 5103 const uint64_t shifted_base = 5104 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5105 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 5106 "compressed class base bad alignment"); 5107 5108 return (_klass_decode_mode = KlassDecodeMovk); 5109 } 5110 5111 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5112 switch (klass_decode_mode()) { 5113 case KlassDecodeZero: 5114 if (CompressedKlassPointers::shift() != 0) { 5115 lsr(dst, src, LogKlassAlignmentInBytes); 5116 } else { 5117 if (dst != src) mov(dst, src); 5118 } 5119 break; 5120 5121 case KlassDecodeXor: 5122 if (CompressedKlassPointers::shift() != 0) { 5123 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5124 lsr(dst, dst, LogKlassAlignmentInBytes); 5125 } else { 5126 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5127 } 5128 break; 5129 5130 case KlassDecodeMovk: 5131 if (CompressedKlassPointers::shift() != 0) { 5132 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 5133 } else { 5134 movw(dst, src); 5135 } 5136 break; 5137 5138 case KlassDecodeNone: 5139 ShouldNotReachHere(); 5140 break; 5141 } 5142 } 5143 5144 void MacroAssembler::encode_klass_not_null(Register r) { 5145 encode_klass_not_null(r, r); 5146 } 5147 5148 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5149 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5150 5151 switch (klass_decode_mode()) { 5152 case KlassDecodeZero: 5153 if (CompressedKlassPointers::shift() != 0) { 5154 lsl(dst, src, LogKlassAlignmentInBytes); 5155 } else { 5156 if (dst != src) mov(dst, src); 5157 } 5158 break; 5159 5160 case KlassDecodeXor: 5161 if (CompressedKlassPointers::shift() != 0) { 5162 lsl(dst, src, LogKlassAlignmentInBytes); 5163 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5164 } else { 5165 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5166 } 5167 break; 5168 5169 case KlassDecodeMovk: { 5170 const uint64_t shifted_base = 5171 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5172 5173 if (dst != src) movw(dst, src); 5174 movk(dst, shifted_base >> 32, 32); 5175 5176 if (CompressedKlassPointers::shift() != 0) { 5177 lsl(dst, dst, LogKlassAlignmentInBytes); 5178 } 5179 5180 break; 5181 } 5182 5183 case KlassDecodeNone: 5184 ShouldNotReachHere(); 5185 break; 5186 } 5187 } 5188 5189 void MacroAssembler::decode_klass_not_null(Register r) { 5190 decode_klass_not_null(r, r); 5191 } 5192 5193 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5194 #ifdef ASSERT 5195 { 5196 ThreadInVMfromUnknown tiv; 5197 assert (UseCompressedOops, "should only be used for compressed oops"); 5198 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5199 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5200 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5201 } 5202 #endif 5203 int oop_index = oop_recorder()->find_index(obj); 5204 InstructionMark im(this); 5205 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5206 code_section()->relocate(inst_mark(), rspec); 5207 movz(dst, 0xDEAD, 16); 5208 movk(dst, 0xBEEF); 5209 } 5210 5211 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5212 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5213 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5214 int index = oop_recorder()->find_index(k); 5215 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5216 5217 InstructionMark im(this); 5218 RelocationHolder rspec = metadata_Relocation::spec(index); 5219 code_section()->relocate(inst_mark(), rspec); 5220 narrowKlass nk = CompressedKlassPointers::encode(k); 5221 movz(dst, (nk >> 16), 16); 5222 movk(dst, nk & 0xffff); 5223 } 5224 5225 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5226 Register dst, Address src, 5227 Register tmp1, Register tmp2) { 5228 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5229 decorators = AccessInternal::decorator_fixup(decorators, type); 5230 bool as_raw = (decorators & AS_RAW) != 0; 5231 if (as_raw) { 5232 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5233 } else { 5234 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5235 } 5236 } 5237 5238 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5239 Address dst, Register val, 5240 Register tmp1, Register tmp2, Register tmp3) { 5241 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5242 decorators = AccessInternal::decorator_fixup(decorators, type); 5243 bool as_raw = (decorators & AS_RAW) != 0; 5244 if (as_raw) { 5245 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5246 } else { 5247 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5248 } 5249 } 5250 5251 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5252 Register tmp2, DecoratorSet decorators) { 5253 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5254 } 5255 5256 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5257 Register tmp2, DecoratorSet decorators) { 5258 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5259 } 5260 5261 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5262 Register tmp2, Register tmp3, DecoratorSet decorators) { 5263 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5264 } 5265 5266 // Used for storing nulls. 5267 void MacroAssembler::store_heap_oop_null(Address dst) { 5268 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5269 } 5270 5271 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5272 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5273 int index = oop_recorder()->allocate_metadata_index(obj); 5274 RelocationHolder rspec = metadata_Relocation::spec(index); 5275 return Address((address)obj, rspec); 5276 } 5277 5278 // Move an oop into a register. 5279 void MacroAssembler::movoop(Register dst, jobject obj) { 5280 int oop_index; 5281 if (obj == nullptr) { 5282 oop_index = oop_recorder()->allocate_oop_index(obj); 5283 } else { 5284 #ifdef ASSERT 5285 { 5286 ThreadInVMfromUnknown tiv; 5287 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5288 } 5289 #endif 5290 oop_index = oop_recorder()->find_index(obj); 5291 } 5292 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5293 5294 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5295 mov(dst, Address((address)obj, rspec)); 5296 } else { 5297 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5298 ldr_constant(dst, Address(dummy, rspec)); 5299 } 5300 5301 } 5302 5303 // Move a metadata address into a register. 5304 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5305 int oop_index; 5306 if (obj == nullptr) { 5307 oop_index = oop_recorder()->allocate_metadata_index(obj); 5308 } else { 5309 oop_index = oop_recorder()->find_index(obj); 5310 } 5311 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5312 mov(dst, Address((address)obj, rspec)); 5313 } 5314 5315 Address MacroAssembler::constant_oop_address(jobject obj) { 5316 #ifdef ASSERT 5317 { 5318 ThreadInVMfromUnknown tiv; 5319 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5320 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5321 } 5322 #endif 5323 int oop_index = oop_recorder()->find_index(obj); 5324 return Address((address)obj, oop_Relocation::spec(oop_index)); 5325 } 5326 5327 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5328 void MacroAssembler::tlab_allocate(Register obj, 5329 Register var_size_in_bytes, 5330 int con_size_in_bytes, 5331 Register t1, 5332 Register t2, 5333 Label& slow_case) { 5334 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5335 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5336 } 5337 5338 void MacroAssembler::inc_held_monitor_count() { 5339 Address dst = Address(rthread, JavaThread::held_monitor_count_offset()); 5340 #ifdef ASSERT 5341 ldr(rscratch2, dst); 5342 increment(rscratch2); 5343 str(rscratch2, dst); 5344 Label ok; 5345 tbz(rscratch2, 63, ok); 5346 STOP("assert(held monitor count underflow)"); 5347 should_not_reach_here(); 5348 bind(ok); 5349 #else 5350 increment(dst); 5351 #endif 5352 } 5353 5354 void MacroAssembler::dec_held_monitor_count() { 5355 Address dst = Address(rthread, JavaThread::held_monitor_count_offset()); 5356 #ifdef ASSERT 5357 ldr(rscratch2, dst); 5358 decrement(rscratch2); 5359 str(rscratch2, dst); 5360 Label ok; 5361 tbz(rscratch2, 63, ok); 5362 STOP("assert(held monitor count underflow)"); 5363 should_not_reach_here(); 5364 bind(ok); 5365 #else 5366 decrement(dst); 5367 #endif 5368 } 5369 5370 void MacroAssembler::verify_tlab() { 5371 #ifdef ASSERT 5372 if (UseTLAB && VerifyOops) { 5373 Label next, ok; 5374 5375 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5376 5377 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5378 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5379 cmp(rscratch2, rscratch1); 5380 br(Assembler::HS, next); 5381 STOP("assert(top >= start)"); 5382 should_not_reach_here(); 5383 5384 bind(next); 5385 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5386 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5387 cmp(rscratch2, rscratch1); 5388 br(Assembler::HS, ok); 5389 STOP("assert(top <= end)"); 5390 should_not_reach_here(); 5391 5392 bind(ok); 5393 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5394 } 5395 #endif 5396 } 5397 5398 // Writes to stack successive pages until offset reached to check for 5399 // stack overflow + shadow pages. This clobbers tmp. 5400 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5401 assert_different_registers(tmp, size, rscratch1); 5402 mov(tmp, sp); 5403 // Bang stack for total size given plus shadow page size. 5404 // Bang one page at a time because large size can bang beyond yellow and 5405 // red zones. 5406 Label loop; 5407 mov(rscratch1, (int)os::vm_page_size()); 5408 bind(loop); 5409 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5410 subsw(size, size, rscratch1); 5411 str(size, Address(tmp)); 5412 br(Assembler::GT, loop); 5413 5414 // Bang down shadow pages too. 5415 // At this point, (tmp-0) is the last address touched, so don't 5416 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5417 // was post-decremented.) Skip this address by starting at i=1, and 5418 // touch a few more pages below. N.B. It is important to touch all 5419 // the way down to and including i=StackShadowPages. 5420 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5421 // this could be any sized move but this is can be a debugging crumb 5422 // so the bigger the better. 5423 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5424 str(size, Address(tmp)); 5425 } 5426 } 5427 5428 // Move the address of the polling page into dest. 5429 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5430 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5431 } 5432 5433 // Read the polling page. The address of the polling page must 5434 // already be in r. 5435 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5436 address mark; 5437 { 5438 InstructionMark im(this); 5439 code_section()->relocate(inst_mark(), rtype); 5440 ldrw(zr, Address(r, 0)); 5441 mark = inst_mark(); 5442 } 5443 verify_cross_modify_fence_not_required(); 5444 return mark; 5445 } 5446 5447 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5448 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5449 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5450 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5451 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5452 int64_t offset_low = dest_page - low_page; 5453 int64_t offset_high = dest_page - high_page; 5454 5455 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5456 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5457 5458 InstructionMark im(this); 5459 code_section()->relocate(inst_mark(), dest.rspec()); 5460 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5461 // the code cache so that if it is relocated we know it will still reach 5462 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5463 _adrp(reg1, dest.target()); 5464 } else { 5465 uint64_t target = (uint64_t)dest.target(); 5466 uint64_t adrp_target 5467 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5468 5469 _adrp(reg1, (address)adrp_target); 5470 movk(reg1, target >> 32, 32); 5471 } 5472 byte_offset = (uint64_t)dest.target() & 0xfff; 5473 } 5474 5475 void MacroAssembler::load_byte_map_base(Register reg) { 5476 CardTable::CardValue* byte_map_base = 5477 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5478 5479 // Strictly speaking the byte_map_base isn't an address at all, and it might 5480 // even be negative. It is thus materialised as a constant. 5481 mov(reg, (uint64_t)byte_map_base); 5482 } 5483 5484 void MacroAssembler::build_frame(int framesize) { 5485 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5486 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5487 protect_return_address(); 5488 if (framesize < ((1 << 9) + 2 * wordSize)) { 5489 sub(sp, sp, framesize); 5490 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5491 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5492 } else { 5493 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5494 if (PreserveFramePointer) mov(rfp, sp); 5495 if (framesize < ((1 << 12) + 2 * wordSize)) 5496 sub(sp, sp, framesize - 2 * wordSize); 5497 else { 5498 mov(rscratch1, framesize - 2 * wordSize); 5499 sub(sp, sp, rscratch1); 5500 } 5501 } 5502 verify_cross_modify_fence_not_required(); 5503 } 5504 5505 void MacroAssembler::remove_frame(int framesize) { 5506 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5507 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5508 if (framesize < ((1 << 9) + 2 * wordSize)) { 5509 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5510 add(sp, sp, framesize); 5511 } else { 5512 if (framesize < ((1 << 12) + 2 * wordSize)) 5513 add(sp, sp, framesize - 2 * wordSize); 5514 else { 5515 mov(rscratch1, framesize - 2 * wordSize); 5516 add(sp, sp, rscratch1); 5517 } 5518 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5519 } 5520 authenticate_return_address(); 5521 } 5522 5523 5524 // This method counts leading positive bytes (highest bit not set) in provided byte array 5525 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5526 // Simple and most common case of aligned small array which is not at the 5527 // end of memory page is placed here. All other cases are in stub. 5528 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5529 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5530 assert_different_registers(ary1, len, result); 5531 5532 mov(result, len); 5533 cmpw(len, 0); 5534 br(LE, DONE); 5535 cmpw(len, 4 * wordSize); 5536 br(GE, STUB_LONG); // size > 32 then go to stub 5537 5538 int shift = 64 - exact_log2(os::vm_page_size()); 5539 lsl(rscratch1, ary1, shift); 5540 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5541 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5542 br(CS, STUB); // at the end of page then go to stub 5543 subs(len, len, wordSize); 5544 br(LT, END); 5545 5546 BIND(LOOP); 5547 ldr(rscratch1, Address(post(ary1, wordSize))); 5548 tst(rscratch1, UPPER_BIT_MASK); 5549 br(NE, SET_RESULT); 5550 subs(len, len, wordSize); 5551 br(GE, LOOP); 5552 cmpw(len, -wordSize); 5553 br(EQ, DONE); 5554 5555 BIND(END); 5556 ldr(rscratch1, Address(ary1)); 5557 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5558 lslv(rscratch1, rscratch1, rscratch2); 5559 tst(rscratch1, UPPER_BIT_MASK); 5560 br(NE, SET_RESULT); 5561 b(DONE); 5562 5563 BIND(STUB); 5564 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5565 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5566 address tpc1 = trampoline_call(count_pos); 5567 if (tpc1 == nullptr) { 5568 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5569 postcond(pc() == badAddress); 5570 return nullptr; 5571 } 5572 b(DONE); 5573 5574 BIND(STUB_LONG); 5575 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5576 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5577 address tpc2 = trampoline_call(count_pos_long); 5578 if (tpc2 == nullptr) { 5579 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5580 postcond(pc() == badAddress); 5581 return nullptr; 5582 } 5583 b(DONE); 5584 5585 BIND(SET_RESULT); 5586 5587 add(len, len, wordSize); 5588 sub(result, result, len); 5589 5590 BIND(DONE); 5591 postcond(pc() != badAddress); 5592 return pc(); 5593 } 5594 5595 // Clobbers: rscratch1, rscratch2, rflags 5596 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5597 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5598 Register tmp4, Register tmp5, Register result, 5599 Register cnt1, int elem_size) { 5600 Label DONE, SAME; 5601 Register tmp1 = rscratch1; 5602 Register tmp2 = rscratch2; 5603 int elem_per_word = wordSize/elem_size; 5604 int log_elem_size = exact_log2(elem_size); 5605 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5606 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5607 int base_offset 5608 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5609 // When the length offset is not aligned to 8 bytes, 5610 // then we align it down. This is valid because the new 5611 // offset will always be the klass which is the same 5612 // for type arrays. 5613 int start_offset = align_down(length_offset, BytesPerWord); 5614 int extra_length = base_offset - start_offset; 5615 assert(start_offset == length_offset || start_offset == klass_offset, 5616 "start offset must be 8-byte-aligned or be the klass offset"); 5617 assert(base_offset != start_offset, "must include the length field"); 5618 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5619 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5620 5621 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5622 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5623 5624 #ifndef PRODUCT 5625 { 5626 const char kind = (elem_size == 2) ? 'U' : 'L'; 5627 char comment[64]; 5628 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5629 BLOCK_COMMENT(comment); 5630 } 5631 #endif 5632 5633 // if (a1 == a2) 5634 // return true; 5635 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5636 br(EQ, SAME); 5637 5638 if (UseSimpleArrayEquals) { 5639 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5640 // if (a1 == nullptr || a2 == nullptr) 5641 // return false; 5642 // a1 & a2 == 0 means (some-pointer is null) or 5643 // (very-rare-or-even-probably-impossible-pointer-values) 5644 // so, we can save one branch in most cases 5645 tst(a1, a2); 5646 mov(result, false); 5647 br(EQ, A_MIGHT_BE_NULL); 5648 // if (a1.length != a2.length) 5649 // return false; 5650 bind(A_IS_NOT_NULL); 5651 ldrw(cnt1, Address(a1, length_offset)); 5652 // Increase loop counter by diff between base- and actual start-offset. 5653 addw(cnt1, cnt1, extra_length); 5654 lea(a1, Address(a1, start_offset)); 5655 lea(a2, Address(a2, start_offset)); 5656 // Check for short strings, i.e. smaller than wordSize. 5657 subs(cnt1, cnt1, elem_per_word); 5658 br(Assembler::LT, SHORT); 5659 // Main 8 byte comparison loop. 5660 bind(NEXT_WORD); { 5661 ldr(tmp1, Address(post(a1, wordSize))); 5662 ldr(tmp2, Address(post(a2, wordSize))); 5663 subs(cnt1, cnt1, elem_per_word); 5664 eor(tmp5, tmp1, tmp2); 5665 cbnz(tmp5, DONE); 5666 } br(GT, NEXT_WORD); 5667 // Last longword. In the case where length == 4 we compare the 5668 // same longword twice, but that's still faster than another 5669 // conditional branch. 5670 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5671 // length == 4. 5672 if (log_elem_size > 0) 5673 lsl(cnt1, cnt1, log_elem_size); 5674 ldr(tmp3, Address(a1, cnt1)); 5675 ldr(tmp4, Address(a2, cnt1)); 5676 eor(tmp5, tmp3, tmp4); 5677 cbnz(tmp5, DONE); 5678 b(SAME); 5679 bind(A_MIGHT_BE_NULL); 5680 // in case both a1 and a2 are not-null, proceed with loads 5681 cbz(a1, DONE); 5682 cbz(a2, DONE); 5683 b(A_IS_NOT_NULL); 5684 bind(SHORT); 5685 5686 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5687 { 5688 ldrw(tmp1, Address(post(a1, 4))); 5689 ldrw(tmp2, Address(post(a2, 4))); 5690 eorw(tmp5, tmp1, tmp2); 5691 cbnzw(tmp5, DONE); 5692 } 5693 bind(TAIL03); 5694 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5695 { 5696 ldrh(tmp3, Address(post(a1, 2))); 5697 ldrh(tmp4, Address(post(a2, 2))); 5698 eorw(tmp5, tmp3, tmp4); 5699 cbnzw(tmp5, DONE); 5700 } 5701 bind(TAIL01); 5702 if (elem_size == 1) { // Only needed when comparing byte arrays. 5703 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5704 { 5705 ldrb(tmp1, a1); 5706 ldrb(tmp2, a2); 5707 eorw(tmp5, tmp1, tmp2); 5708 cbnzw(tmp5, DONE); 5709 } 5710 } 5711 } else { 5712 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5713 CSET_EQ, LAST_CHECK; 5714 mov(result, false); 5715 cbz(a1, DONE); 5716 ldrw(cnt1, Address(a1, length_offset)); 5717 cbz(a2, DONE); 5718 // Increase loop counter by diff between base- and actual start-offset. 5719 addw(cnt1, cnt1, extra_length); 5720 5721 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5722 // faster to perform another branch before comparing a1 and a2 5723 cmp(cnt1, (u1)elem_per_word); 5724 br(LE, SHORT); // short or same 5725 ldr(tmp3, Address(pre(a1, start_offset))); 5726 subs(zr, cnt1, stubBytesThreshold); 5727 br(GE, STUB); 5728 ldr(tmp4, Address(pre(a2, start_offset))); 5729 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5730 5731 // Main 16 byte comparison loop with 2 exits 5732 bind(NEXT_DWORD); { 5733 ldr(tmp1, Address(pre(a1, wordSize))); 5734 ldr(tmp2, Address(pre(a2, wordSize))); 5735 subs(cnt1, cnt1, 2 * elem_per_word); 5736 br(LE, TAIL); 5737 eor(tmp4, tmp3, tmp4); 5738 cbnz(tmp4, DONE); 5739 ldr(tmp3, Address(pre(a1, wordSize))); 5740 ldr(tmp4, Address(pre(a2, wordSize))); 5741 cmp(cnt1, (u1)elem_per_word); 5742 br(LE, TAIL2); 5743 cmp(tmp1, tmp2); 5744 } br(EQ, NEXT_DWORD); 5745 b(DONE); 5746 5747 bind(TAIL); 5748 eor(tmp4, tmp3, tmp4); 5749 eor(tmp2, tmp1, tmp2); 5750 lslv(tmp2, tmp2, tmp5); 5751 orr(tmp5, tmp4, tmp2); 5752 cmp(tmp5, zr); 5753 b(CSET_EQ); 5754 5755 bind(TAIL2); 5756 eor(tmp2, tmp1, tmp2); 5757 cbnz(tmp2, DONE); 5758 b(LAST_CHECK); 5759 5760 bind(STUB); 5761 ldr(tmp4, Address(pre(a2, start_offset))); 5762 if (elem_size == 2) { // convert to byte counter 5763 lsl(cnt1, cnt1, 1); 5764 } 5765 eor(tmp5, tmp3, tmp4); 5766 cbnz(tmp5, DONE); 5767 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5768 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5769 address tpc = trampoline_call(stub); 5770 if (tpc == nullptr) { 5771 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5772 postcond(pc() == badAddress); 5773 return nullptr; 5774 } 5775 b(DONE); 5776 5777 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5778 // so, if a2 == null => return false(0), else return true, so we can return a2 5779 mov(result, a2); 5780 b(DONE); 5781 bind(SHORT); 5782 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5783 ldr(tmp3, Address(a1, start_offset)); 5784 ldr(tmp4, Address(a2, start_offset)); 5785 bind(LAST_CHECK); 5786 eor(tmp4, tmp3, tmp4); 5787 lslv(tmp5, tmp4, tmp5); 5788 cmp(tmp5, zr); 5789 bind(CSET_EQ); 5790 cset(result, EQ); 5791 b(DONE); 5792 } 5793 5794 bind(SAME); 5795 mov(result, true); 5796 // That's it. 5797 bind(DONE); 5798 5799 BLOCK_COMMENT("} array_equals"); 5800 postcond(pc() != badAddress); 5801 return pc(); 5802 } 5803 5804 // Compare Strings 5805 5806 // For Strings we're passed the address of the first characters in a1 5807 // and a2 and the length in cnt1. 5808 // There are two implementations. For arrays >= 8 bytes, all 5809 // comparisons (including the final one, which may overlap) are 5810 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5811 // halfword, then a short, and then a byte. 5812 5813 void MacroAssembler::string_equals(Register a1, Register a2, 5814 Register result, Register cnt1) 5815 { 5816 Label SAME, DONE, SHORT, NEXT_WORD; 5817 Register tmp1 = rscratch1; 5818 Register tmp2 = rscratch2; 5819 Register cnt2 = tmp2; // cnt2 only used in array length compare 5820 5821 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5822 5823 #ifndef PRODUCT 5824 { 5825 char comment[64]; 5826 snprintf(comment, sizeof comment, "{string_equalsL"); 5827 BLOCK_COMMENT(comment); 5828 } 5829 #endif 5830 5831 mov(result, false); 5832 5833 // Check for short strings, i.e. smaller than wordSize. 5834 subs(cnt1, cnt1, wordSize); 5835 br(Assembler::LT, SHORT); 5836 // Main 8 byte comparison loop. 5837 bind(NEXT_WORD); { 5838 ldr(tmp1, Address(post(a1, wordSize))); 5839 ldr(tmp2, Address(post(a2, wordSize))); 5840 subs(cnt1, cnt1, wordSize); 5841 eor(tmp1, tmp1, tmp2); 5842 cbnz(tmp1, DONE); 5843 } br(GT, NEXT_WORD); 5844 // Last longword. In the case where length == 4 we compare the 5845 // same longword twice, but that's still faster than another 5846 // conditional branch. 5847 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5848 // length == 4. 5849 ldr(tmp1, Address(a1, cnt1)); 5850 ldr(tmp2, Address(a2, cnt1)); 5851 eor(tmp2, tmp1, tmp2); 5852 cbnz(tmp2, DONE); 5853 b(SAME); 5854 5855 bind(SHORT); 5856 Label TAIL03, TAIL01; 5857 5858 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5859 { 5860 ldrw(tmp1, Address(post(a1, 4))); 5861 ldrw(tmp2, Address(post(a2, 4))); 5862 eorw(tmp1, tmp1, tmp2); 5863 cbnzw(tmp1, DONE); 5864 } 5865 bind(TAIL03); 5866 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5867 { 5868 ldrh(tmp1, Address(post(a1, 2))); 5869 ldrh(tmp2, Address(post(a2, 2))); 5870 eorw(tmp1, tmp1, tmp2); 5871 cbnzw(tmp1, DONE); 5872 } 5873 bind(TAIL01); 5874 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5875 { 5876 ldrb(tmp1, a1); 5877 ldrb(tmp2, a2); 5878 eorw(tmp1, tmp1, tmp2); 5879 cbnzw(tmp1, DONE); 5880 } 5881 // Arrays are equal. 5882 bind(SAME); 5883 mov(result, true); 5884 5885 // That's it. 5886 bind(DONE); 5887 BLOCK_COMMENT("} string_equals"); 5888 } 5889 5890 5891 // The size of the blocks erased by the zero_blocks stub. We must 5892 // handle anything smaller than this ourselves in zero_words(). 5893 const int MacroAssembler::zero_words_block_size = 8; 5894 5895 // zero_words() is used by C2 ClearArray patterns and by 5896 // C1_MacroAssembler. It is as small as possible, handling small word 5897 // counts locally and delegating anything larger to the zero_blocks 5898 // stub. It is expanded many times in compiled code, so it is 5899 // important to keep it short. 5900 5901 // ptr: Address of a buffer to be zeroed. 5902 // cnt: Count in HeapWords. 5903 // 5904 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5905 address MacroAssembler::zero_words(Register ptr, Register cnt) 5906 { 5907 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5908 5909 BLOCK_COMMENT("zero_words {"); 5910 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5911 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5912 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5913 5914 subs(rscratch1, cnt, zero_words_block_size); 5915 Label around; 5916 br(LO, around); 5917 { 5918 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5919 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5920 // Make sure this is a C2 compilation. C1 allocates space only for 5921 // trampoline stubs generated by Call LIR ops, and in any case it 5922 // makes sense for a C1 compilation task to proceed as quickly as 5923 // possible. 5924 CompileTask* task; 5925 if (StubRoutines::aarch64::complete() 5926 && Thread::current()->is_Compiler_thread() 5927 && (task = ciEnv::current()->task()) 5928 && is_c2_compile(task->comp_level())) { 5929 address tpc = trampoline_call(zero_blocks); 5930 if (tpc == nullptr) { 5931 DEBUG_ONLY(reset_labels(around)); 5932 return nullptr; 5933 } 5934 } else { 5935 far_call(zero_blocks); 5936 } 5937 } 5938 bind(around); 5939 5940 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5941 // for us. 5942 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5943 Label l; 5944 tbz(cnt, exact_log2(i), l); 5945 for (int j = 0; j < i; j += 2) { 5946 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5947 } 5948 bind(l); 5949 } 5950 { 5951 Label l; 5952 tbz(cnt, 0, l); 5953 str(zr, Address(ptr)); 5954 bind(l); 5955 } 5956 5957 BLOCK_COMMENT("} zero_words"); 5958 return pc(); 5959 } 5960 5961 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5962 // cnt: Immediate count in HeapWords. 5963 // 5964 // r10, r11, rscratch1, and rscratch2 are clobbered. 5965 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5966 { 5967 assert(wordSize <= BlockZeroingLowLimit, 5968 "increase BlockZeroingLowLimit"); 5969 address result = nullptr; 5970 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5971 #ifndef PRODUCT 5972 { 5973 char buf[64]; 5974 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5975 BLOCK_COMMENT(buf); 5976 } 5977 #endif 5978 if (cnt >= 16) { 5979 uint64_t loops = cnt/16; 5980 if (loops > 1) { 5981 mov(rscratch2, loops - 1); 5982 } 5983 { 5984 Label loop; 5985 bind(loop); 5986 for (int i = 0; i < 16; i += 2) { 5987 stp(zr, zr, Address(base, i * BytesPerWord)); 5988 } 5989 add(base, base, 16 * BytesPerWord); 5990 if (loops > 1) { 5991 subs(rscratch2, rscratch2, 1); 5992 br(GE, loop); 5993 } 5994 } 5995 } 5996 cnt %= 16; 5997 int i = cnt & 1; // store any odd word to start 5998 if (i) str(zr, Address(base)); 5999 for (; i < (int)cnt; i += 2) { 6000 stp(zr, zr, Address(base, i * wordSize)); 6001 } 6002 BLOCK_COMMENT("} zero_words"); 6003 result = pc(); 6004 } else { 6005 mov(r10, base); mov(r11, cnt); 6006 result = zero_words(r10, r11); 6007 } 6008 return result; 6009 } 6010 6011 // Zero blocks of memory by using DC ZVA. 6012 // 6013 // Aligns the base address first sufficiently for DC ZVA, then uses 6014 // DC ZVA repeatedly for every full block. cnt is the size to be 6015 // zeroed in HeapWords. Returns the count of words left to be zeroed 6016 // in cnt. 6017 // 6018 // NOTE: This is intended to be used in the zero_blocks() stub. If 6019 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6020 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6021 Register tmp = rscratch1; 6022 Register tmp2 = rscratch2; 6023 int zva_length = VM_Version::zva_length(); 6024 Label initial_table_end, loop_zva; 6025 Label fini; 6026 6027 // Base must be 16 byte aligned. If not just return and let caller handle it 6028 tst(base, 0x0f); 6029 br(Assembler::NE, fini); 6030 // Align base with ZVA length. 6031 neg(tmp, base); 6032 andr(tmp, tmp, zva_length - 1); 6033 6034 // tmp: the number of bytes to be filled to align the base with ZVA length. 6035 add(base, base, tmp); 6036 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6037 adr(tmp2, initial_table_end); 6038 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6039 br(tmp2); 6040 6041 for (int i = -zva_length + 16; i < 0; i += 16) 6042 stp(zr, zr, Address(base, i)); 6043 bind(initial_table_end); 6044 6045 sub(cnt, cnt, zva_length >> 3); 6046 bind(loop_zva); 6047 dc(Assembler::ZVA, base); 6048 subs(cnt, cnt, zva_length >> 3); 6049 add(base, base, zva_length); 6050 br(Assembler::GE, loop_zva); 6051 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6052 bind(fini); 6053 } 6054 6055 // base: Address of a buffer to be filled, 8 bytes aligned. 6056 // cnt: Count in 8-byte unit. 6057 // value: Value to be filled with. 6058 // base will point to the end of the buffer after filling. 6059 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6060 { 6061 // Algorithm: 6062 // 6063 // if (cnt == 0) { 6064 // return; 6065 // } 6066 // if ((p & 8) != 0) { 6067 // *p++ = v; 6068 // } 6069 // 6070 // scratch1 = cnt & 14; 6071 // cnt -= scratch1; 6072 // p += scratch1; 6073 // switch (scratch1 / 2) { 6074 // do { 6075 // cnt -= 16; 6076 // p[-16] = v; 6077 // p[-15] = v; 6078 // case 7: 6079 // p[-14] = v; 6080 // p[-13] = v; 6081 // case 6: 6082 // p[-12] = v; 6083 // p[-11] = v; 6084 // // ... 6085 // case 1: 6086 // p[-2] = v; 6087 // p[-1] = v; 6088 // case 0: 6089 // p += 16; 6090 // } while (cnt); 6091 // } 6092 // if ((cnt & 1) == 1) { 6093 // *p++ = v; 6094 // } 6095 6096 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6097 6098 Label fini, skip, entry, loop; 6099 const int unroll = 8; // Number of stp instructions we'll unroll 6100 6101 cbz(cnt, fini); 6102 tbz(base, 3, skip); 6103 str(value, Address(post(base, 8))); 6104 sub(cnt, cnt, 1); 6105 bind(skip); 6106 6107 andr(rscratch1, cnt, (unroll-1) * 2); 6108 sub(cnt, cnt, rscratch1); 6109 add(base, base, rscratch1, Assembler::LSL, 3); 6110 adr(rscratch2, entry); 6111 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6112 br(rscratch2); 6113 6114 bind(loop); 6115 add(base, base, unroll * 16); 6116 for (int i = -unroll; i < 0; i++) 6117 stp(value, value, Address(base, i * 16)); 6118 bind(entry); 6119 subs(cnt, cnt, unroll * 2); 6120 br(Assembler::GE, loop); 6121 6122 tbz(cnt, 0, fini); 6123 str(value, Address(post(base, 8))); 6124 bind(fini); 6125 } 6126 6127 // Intrinsic for 6128 // 6129 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6130 // return the number of characters copied. 6131 // - java/lang/StringUTF16.compress 6132 // return index of non-latin1 character if copy fails, otherwise 'len'. 6133 // 6134 // This version always returns the number of characters copied, and does not 6135 // clobber the 'len' register. A successful copy will complete with the post- 6136 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6137 // post-condition: 0 <= 'res' < 'len'. 6138 // 6139 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6140 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6141 // beyond the acceptable, even though the footprint would be smaller. 6142 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6143 // avoid additional bloat. 6144 // 6145 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6146 void MacroAssembler::encode_iso_array(Register src, Register dst, 6147 Register len, Register res, bool ascii, 6148 FloatRegister vtmp0, FloatRegister vtmp1, 6149 FloatRegister vtmp2, FloatRegister vtmp3, 6150 FloatRegister vtmp4, FloatRegister vtmp5) 6151 { 6152 Register cnt = res; 6153 Register max = rscratch1; 6154 Register chk = rscratch2; 6155 6156 prfm(Address(src), PLDL1STRM); 6157 movw(cnt, len); 6158 6159 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6160 6161 Label LOOP_32, DONE_32, FAIL_32; 6162 6163 BIND(LOOP_32); 6164 { 6165 cmpw(cnt, 32); 6166 br(LT, DONE_32); 6167 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6168 // Extract lower bytes. 6169 FloatRegister vlo0 = vtmp4; 6170 FloatRegister vlo1 = vtmp5; 6171 uzp1(vlo0, T16B, vtmp0, vtmp1); 6172 uzp1(vlo1, T16B, vtmp2, vtmp3); 6173 // Merge bits... 6174 orr(vtmp0, T16B, vtmp0, vtmp1); 6175 orr(vtmp2, T16B, vtmp2, vtmp3); 6176 // Extract merged upper bytes. 6177 FloatRegister vhix = vtmp0; 6178 uzp2(vhix, T16B, vtmp0, vtmp2); 6179 // ISO-check on hi-parts (all zero). 6180 // ASCII-check on lo-parts (no sign). 6181 FloatRegister vlox = vtmp1; // Merge lower bytes. 6182 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6183 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6184 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6185 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6186 ASCII(orr(chk, chk, max)); 6187 cbnz(chk, FAIL_32); 6188 subw(cnt, cnt, 32); 6189 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6190 b(LOOP_32); 6191 } 6192 BIND(FAIL_32); 6193 sub(src, src, 64); 6194 BIND(DONE_32); 6195 6196 Label LOOP_8, SKIP_8; 6197 6198 BIND(LOOP_8); 6199 { 6200 cmpw(cnt, 8); 6201 br(LT, SKIP_8); 6202 FloatRegister vhi = vtmp0; 6203 FloatRegister vlo = vtmp1; 6204 ld1(vtmp3, T8H, src); 6205 uzp1(vlo, T16B, vtmp3, vtmp3); 6206 uzp2(vhi, T16B, vtmp3, vtmp3); 6207 // ISO-check on hi-parts (all zero). 6208 // ASCII-check on lo-parts (no sign). 6209 ASCII(cm(LT, vtmp2, T16B, vlo)); 6210 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6211 ASCII(umov(max, vtmp2, B, 0)); 6212 ASCII(orr(chk, chk, max)); 6213 cbnz(chk, SKIP_8); 6214 6215 strd(vlo, Address(post(dst, 8))); 6216 subw(cnt, cnt, 8); 6217 add(src, src, 16); 6218 b(LOOP_8); 6219 } 6220 BIND(SKIP_8); 6221 6222 #undef ASCII 6223 6224 Label LOOP, DONE; 6225 6226 cbz(cnt, DONE); 6227 BIND(LOOP); 6228 { 6229 Register chr = rscratch1; 6230 ldrh(chr, Address(post(src, 2))); 6231 tst(chr, ascii ? 0xff80 : 0xff00); 6232 br(NE, DONE); 6233 strb(chr, Address(post(dst, 1))); 6234 subs(cnt, cnt, 1); 6235 br(GT, LOOP); 6236 } 6237 BIND(DONE); 6238 // Return index where we stopped. 6239 subw(res, len, cnt); 6240 } 6241 6242 // Inflate byte[] array to char[]. 6243 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6244 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6245 FloatRegister vtmp1, FloatRegister vtmp2, 6246 FloatRegister vtmp3, Register tmp4) { 6247 Label big, done, after_init, to_stub; 6248 6249 assert_different_registers(src, dst, len, tmp4, rscratch1); 6250 6251 fmovd(vtmp1, 0.0); 6252 lsrw(tmp4, len, 3); 6253 bind(after_init); 6254 cbnzw(tmp4, big); 6255 // Short string: less than 8 bytes. 6256 { 6257 Label loop, tiny; 6258 6259 cmpw(len, 4); 6260 br(LT, tiny); 6261 // Use SIMD to do 4 bytes. 6262 ldrs(vtmp2, post(src, 4)); 6263 zip1(vtmp3, T8B, vtmp2, vtmp1); 6264 subw(len, len, 4); 6265 strd(vtmp3, post(dst, 8)); 6266 6267 cbzw(len, done); 6268 6269 // Do the remaining bytes by steam. 6270 bind(loop); 6271 ldrb(tmp4, post(src, 1)); 6272 strh(tmp4, post(dst, 2)); 6273 subw(len, len, 1); 6274 6275 bind(tiny); 6276 cbnz(len, loop); 6277 6278 b(done); 6279 } 6280 6281 if (SoftwarePrefetchHintDistance >= 0) { 6282 bind(to_stub); 6283 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6284 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6285 address tpc = trampoline_call(stub); 6286 if (tpc == nullptr) { 6287 DEBUG_ONLY(reset_labels(big, done)); 6288 postcond(pc() == badAddress); 6289 return nullptr; 6290 } 6291 b(after_init); 6292 } 6293 6294 // Unpack the bytes 8 at a time. 6295 bind(big); 6296 { 6297 Label loop, around, loop_last, loop_start; 6298 6299 if (SoftwarePrefetchHintDistance >= 0) { 6300 const int large_loop_threshold = (64 + 16)/8; 6301 ldrd(vtmp2, post(src, 8)); 6302 andw(len, len, 7); 6303 cmp(tmp4, (u1)large_loop_threshold); 6304 br(GE, to_stub); 6305 b(loop_start); 6306 6307 bind(loop); 6308 ldrd(vtmp2, post(src, 8)); 6309 bind(loop_start); 6310 subs(tmp4, tmp4, 1); 6311 br(EQ, loop_last); 6312 zip1(vtmp2, T16B, vtmp2, vtmp1); 6313 ldrd(vtmp3, post(src, 8)); 6314 st1(vtmp2, T8H, post(dst, 16)); 6315 subs(tmp4, tmp4, 1); 6316 zip1(vtmp3, T16B, vtmp3, vtmp1); 6317 st1(vtmp3, T8H, post(dst, 16)); 6318 br(NE, loop); 6319 b(around); 6320 bind(loop_last); 6321 zip1(vtmp2, T16B, vtmp2, vtmp1); 6322 st1(vtmp2, T8H, post(dst, 16)); 6323 bind(around); 6324 cbz(len, done); 6325 } else { 6326 andw(len, len, 7); 6327 bind(loop); 6328 ldrd(vtmp2, post(src, 8)); 6329 sub(tmp4, tmp4, 1); 6330 zip1(vtmp3, T16B, vtmp2, vtmp1); 6331 st1(vtmp3, T8H, post(dst, 16)); 6332 cbnz(tmp4, loop); 6333 } 6334 } 6335 6336 // Do the tail of up to 8 bytes. 6337 add(src, src, len); 6338 ldrd(vtmp3, Address(src, -8)); 6339 add(dst, dst, len, ext::uxtw, 1); 6340 zip1(vtmp3, T16B, vtmp3, vtmp1); 6341 strq(vtmp3, Address(dst, -16)); 6342 6343 bind(done); 6344 postcond(pc() != badAddress); 6345 return pc(); 6346 } 6347 6348 // Compress char[] array to byte[]. 6349 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6350 // Return the array length if every element in array can be encoded, 6351 // otherwise, the index of first non-latin1 (> 0xff) character. 6352 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6353 Register res, 6354 FloatRegister tmp0, FloatRegister tmp1, 6355 FloatRegister tmp2, FloatRegister tmp3, 6356 FloatRegister tmp4, FloatRegister tmp5) { 6357 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6358 } 6359 6360 // java.math.round(double a) 6361 // Returns the closest long to the argument, with ties rounding to 6362 // positive infinity. This requires some fiddling for corner 6363 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6364 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6365 FloatRegister ftmp) { 6366 Label DONE; 6367 BLOCK_COMMENT("java_round_double: { "); 6368 fmovd(rscratch1, src); 6369 // Use RoundToNearestTiesAway unless src small and -ve. 6370 fcvtasd(dst, src); 6371 // Test if src >= 0 || abs(src) >= 0x1.0p52 6372 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6373 mov(rscratch2, julong_cast(0x1.0p52)); 6374 cmp(rscratch1, rscratch2); 6375 br(HS, DONE); { 6376 // src < 0 && abs(src) < 0x1.0p52 6377 // src may have a fractional part, so add 0.5 6378 fmovd(ftmp, 0.5); 6379 faddd(ftmp, src, ftmp); 6380 // Convert double to jlong, use RoundTowardsNegative 6381 fcvtmsd(dst, ftmp); 6382 } 6383 bind(DONE); 6384 BLOCK_COMMENT("} java_round_double"); 6385 } 6386 6387 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6388 FloatRegister ftmp) { 6389 Label DONE; 6390 BLOCK_COMMENT("java_round_float: { "); 6391 fmovs(rscratch1, src); 6392 // Use RoundToNearestTiesAway unless src small and -ve. 6393 fcvtassw(dst, src); 6394 // Test if src >= 0 || abs(src) >= 0x1.0p23 6395 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6396 mov(rscratch2, jint_cast(0x1.0p23f)); 6397 cmp(rscratch1, rscratch2); 6398 br(HS, DONE); { 6399 // src < 0 && |src| < 0x1.0p23 6400 // src may have a fractional part, so add 0.5 6401 fmovs(ftmp, 0.5f); 6402 fadds(ftmp, src, ftmp); 6403 // Convert float to jint, use RoundTowardsNegative 6404 fcvtmssw(dst, ftmp); 6405 } 6406 bind(DONE); 6407 BLOCK_COMMENT("} java_round_float"); 6408 } 6409 6410 // get_thread() can be called anywhere inside generated code so we 6411 // need to save whatever non-callee save context might get clobbered 6412 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6413 // the call setup code. 6414 // 6415 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6416 // On other systems, the helper is a usual C function. 6417 // 6418 void MacroAssembler::get_thread(Register dst) { 6419 RegSet saved_regs = 6420 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6421 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6422 6423 protect_return_address(); 6424 push(saved_regs, sp); 6425 6426 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6427 blr(lr); 6428 if (dst != c_rarg0) { 6429 mov(dst, c_rarg0); 6430 } 6431 6432 pop(saved_regs, sp); 6433 authenticate_return_address(); 6434 } 6435 6436 void MacroAssembler::cache_wb(Address line) { 6437 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6438 assert(line.index() == noreg, "index should be noreg"); 6439 assert(line.offset() == 0, "offset should be 0"); 6440 // would like to assert this 6441 // assert(line._ext.shift == 0, "shift should be zero"); 6442 if (VM_Version::supports_dcpop()) { 6443 // writeback using clear virtual address to point of persistence 6444 dc(Assembler::CVAP, line.base()); 6445 } else { 6446 // no need to generate anything as Unsafe.writebackMemory should 6447 // never invoke this stub 6448 } 6449 } 6450 6451 void MacroAssembler::cache_wbsync(bool is_pre) { 6452 // we only need a barrier post sync 6453 if (!is_pre) { 6454 membar(Assembler::AnyAny); 6455 } 6456 } 6457 6458 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6459 // Make sure that native code does not change SVE vector length. 6460 if (!UseSVE) return; 6461 Label verify_ok; 6462 movw(tmp, zr); 6463 sve_inc(tmp, B); 6464 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6465 br(EQ, verify_ok); 6466 stop("Error: SVE vector length has changed since jvm startup"); 6467 bind(verify_ok); 6468 } 6469 6470 void MacroAssembler::verify_ptrue() { 6471 Label verify_ok; 6472 if (!UseSVE) { 6473 return; 6474 } 6475 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6476 sve_dec(rscratch1, B); 6477 cbz(rscratch1, verify_ok); 6478 stop("Error: the preserved predicate register (p7) elements are not all true"); 6479 bind(verify_ok); 6480 } 6481 6482 void MacroAssembler::safepoint_isb() { 6483 isb(); 6484 #ifndef PRODUCT 6485 if (VerifyCrossModifyFence) { 6486 // Clear the thread state. 6487 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6488 } 6489 #endif 6490 } 6491 6492 #ifndef PRODUCT 6493 void MacroAssembler::verify_cross_modify_fence_not_required() { 6494 if (VerifyCrossModifyFence) { 6495 // Check if thread needs a cross modify fence. 6496 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6497 Label fence_not_required; 6498 cbz(rscratch1, fence_not_required); 6499 // If it does then fail. 6500 lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)); 6501 mov(c_rarg0, rthread); 6502 blr(rscratch1); 6503 bind(fence_not_required); 6504 } 6505 } 6506 #endif 6507 6508 void MacroAssembler::spin_wait() { 6509 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6510 switch (VM_Version::spin_wait_desc().inst()) { 6511 case SpinWait::NOP: 6512 nop(); 6513 break; 6514 case SpinWait::ISB: 6515 isb(); 6516 break; 6517 case SpinWait::YIELD: 6518 yield(); 6519 break; 6520 default: 6521 ShouldNotReachHere(); 6522 } 6523 } 6524 } 6525 6526 // Stack frame creation/removal 6527 6528 void MacroAssembler::enter(bool strip_ret_addr) { 6529 if (strip_ret_addr) { 6530 // Addresses can only be signed once. If there are multiple nested frames being created 6531 // in the same function, then the return address needs stripping first. 6532 strip_return_address(); 6533 } 6534 protect_return_address(); 6535 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6536 mov(rfp, sp); 6537 } 6538 6539 void MacroAssembler::leave() { 6540 mov(sp, rfp); 6541 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6542 authenticate_return_address(); 6543 } 6544 6545 // ROP Protection 6546 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6547 // destroying stack frames or whenever directly loading/storing the LR to memory. 6548 // If ROP protection is not set then these functions are no-ops. 6549 // For more details on PAC see pauth_aarch64.hpp. 6550 6551 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6552 // Uses value zero as the modifier. 6553 // 6554 void MacroAssembler::protect_return_address() { 6555 if (VM_Version::use_rop_protection()) { 6556 check_return_address(); 6557 paciaz(); 6558 } 6559 } 6560 6561 // Sign the return value in the given register. Use before updating the LR in the existing stack 6562 // frame for the current function. 6563 // Uses value zero as the modifier. 6564 // 6565 void MacroAssembler::protect_return_address(Register return_reg) { 6566 if (VM_Version::use_rop_protection()) { 6567 check_return_address(return_reg); 6568 paciza(return_reg); 6569 } 6570 } 6571 6572 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6573 // Uses value zero as the modifier. 6574 // 6575 void MacroAssembler::authenticate_return_address() { 6576 if (VM_Version::use_rop_protection()) { 6577 autiaz(); 6578 check_return_address(); 6579 } 6580 } 6581 6582 // Authenticate the return value in the given register. Use before updating the LR in the existing 6583 // stack frame for the current function. 6584 // Uses value zero as the modifier. 6585 // 6586 void MacroAssembler::authenticate_return_address(Register return_reg) { 6587 if (VM_Version::use_rop_protection()) { 6588 autiza(return_reg); 6589 check_return_address(return_reg); 6590 } 6591 } 6592 6593 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6594 // there is no guaranteed way of authenticating the LR. 6595 // 6596 void MacroAssembler::strip_return_address() { 6597 if (VM_Version::use_rop_protection()) { 6598 xpaclri(); 6599 } 6600 } 6601 6602 #ifndef PRODUCT 6603 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6604 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6605 // it is difficult to debug back to the callee function. 6606 // This function simply loads from the address in the given register. 6607 // Use directly after authentication to catch authentication failures. 6608 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6609 // 6610 void MacroAssembler::check_return_address(Register return_reg) { 6611 if (VM_Version::use_rop_protection()) { 6612 ldr(zr, Address(return_reg)); 6613 } 6614 } 6615 #endif 6616 6617 // The java_calling_convention describes stack locations as ideal slots on 6618 // a frame with no abi restrictions. Since we must observe abi restrictions 6619 // (like the placement of the register window) the slots must be biased by 6620 // the following value. 6621 static int reg2offset_in(VMReg r) { 6622 // Account for saved rfp and lr 6623 // This should really be in_preserve_stack_slots 6624 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6625 } 6626 6627 static int reg2offset_out(VMReg r) { 6628 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6629 } 6630 6631 // On 64bit we will store integer like items to the stack as 6632 // 64bits items (AArch64 ABI) even though java would only store 6633 // 32bits for a parameter. On 32bit it will simply be 32bits 6634 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6635 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6636 if (src.first()->is_stack()) { 6637 if (dst.first()->is_stack()) { 6638 // stack to stack 6639 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6640 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6641 } else { 6642 // stack to reg 6643 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6644 } 6645 } else if (dst.first()->is_stack()) { 6646 // reg to stack 6647 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6648 } else { 6649 if (dst.first() != src.first()) { 6650 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6651 } 6652 } 6653 } 6654 6655 // An oop arg. Must pass a handle not the oop itself 6656 void MacroAssembler::object_move( 6657 OopMap* map, 6658 int oop_handle_offset, 6659 int framesize_in_slots, 6660 VMRegPair src, 6661 VMRegPair dst, 6662 bool is_receiver, 6663 int* receiver_offset) { 6664 6665 // must pass a handle. First figure out the location we use as a handle 6666 6667 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6668 6669 // See if oop is null if it is we need no handle 6670 6671 if (src.first()->is_stack()) { 6672 6673 // Oop is already on the stack as an argument 6674 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6675 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6676 if (is_receiver) { 6677 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6678 } 6679 6680 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6681 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6682 // conditionally move a null 6683 cmp(rscratch1, zr); 6684 csel(rHandle, zr, rHandle, Assembler::EQ); 6685 } else { 6686 6687 // Oop is in an a register we must store it to the space we reserve 6688 // on the stack for oop_handles and pass a handle if oop is non-null 6689 6690 const Register rOop = src.first()->as_Register(); 6691 int oop_slot; 6692 if (rOop == j_rarg0) 6693 oop_slot = 0; 6694 else if (rOop == j_rarg1) 6695 oop_slot = 1; 6696 else if (rOop == j_rarg2) 6697 oop_slot = 2; 6698 else if (rOop == j_rarg3) 6699 oop_slot = 3; 6700 else if (rOop == j_rarg4) 6701 oop_slot = 4; 6702 else if (rOop == j_rarg5) 6703 oop_slot = 5; 6704 else if (rOop == j_rarg6) 6705 oop_slot = 6; 6706 else { 6707 assert(rOop == j_rarg7, "wrong register"); 6708 oop_slot = 7; 6709 } 6710 6711 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6712 int offset = oop_slot*VMRegImpl::stack_slot_size; 6713 6714 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6715 // Store oop in handle area, may be null 6716 str(rOop, Address(sp, offset)); 6717 if (is_receiver) { 6718 *receiver_offset = offset; 6719 } 6720 6721 cmp(rOop, zr); 6722 lea(rHandle, Address(sp, offset)); 6723 // conditionally move a null 6724 csel(rHandle, zr, rHandle, Assembler::EQ); 6725 } 6726 6727 // If arg is on the stack then place it otherwise it is already in correct reg. 6728 if (dst.first()->is_stack()) { 6729 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6730 } 6731 } 6732 6733 // A float arg may have to do float reg int reg conversion 6734 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6735 if (src.first()->is_stack()) { 6736 if (dst.first()->is_stack()) { 6737 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6738 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6739 } else { 6740 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6741 } 6742 } else if (src.first() != dst.first()) { 6743 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6744 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6745 else 6746 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6747 } 6748 } 6749 6750 // A long move 6751 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6752 if (src.first()->is_stack()) { 6753 if (dst.first()->is_stack()) { 6754 // stack to stack 6755 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6756 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6757 } else { 6758 // stack to reg 6759 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6760 } 6761 } else if (dst.first()->is_stack()) { 6762 // reg to stack 6763 // Do we really have to sign extend??? 6764 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6765 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6766 } else { 6767 if (dst.first() != src.first()) { 6768 mov(dst.first()->as_Register(), src.first()->as_Register()); 6769 } 6770 } 6771 } 6772 6773 6774 // A double move 6775 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6776 if (src.first()->is_stack()) { 6777 if (dst.first()->is_stack()) { 6778 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6779 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6780 } else { 6781 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6782 } 6783 } else if (src.first() != dst.first()) { 6784 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6785 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6786 else 6787 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6788 } 6789 } 6790 6791 // Implements lightweight-locking. 6792 // 6793 // - obj: the object to be locked 6794 // - t1, t2, t3: temporary registers, will be destroyed 6795 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 6796 void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6797 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6798 assert_different_registers(obj, t1, t2, t3, rscratch1); 6799 6800 Label push; 6801 const Register top = t1; 6802 const Register mark = t2; 6803 const Register t = t3; 6804 6805 // Preload the markWord. It is important that this is the first 6806 // instruction emitted as it is part of C1's null check semantics. 6807 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6808 6809 // Check if the lock-stack is full. 6810 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6811 cmpw(top, (unsigned)LockStack::end_offset()); 6812 br(Assembler::GE, slow); 6813 6814 // Check for recursion. 6815 subw(t, top, oopSize); 6816 ldr(t, Address(rthread, t)); 6817 cmp(obj, t); 6818 br(Assembler::EQ, push); 6819 6820 // Check header for monitor (0b10). 6821 tst(mark, markWord::monitor_value); 6822 br(Assembler::NE, slow); 6823 6824 // Try to lock. Transition lock bits 0b01 => 0b00 6825 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6826 orr(mark, mark, markWord::unlocked_value); 6827 eor(t, mark, markWord::unlocked_value); 6828 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 6829 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 6830 br(Assembler::NE, slow); 6831 6832 bind(push); 6833 // After successful lock, push object on lock-stack. 6834 str(obj, Address(rthread, top)); 6835 addw(top, top, oopSize); 6836 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6837 } 6838 6839 // Implements lightweight-unlocking. 6840 // 6841 // - obj: the object to be unlocked 6842 // - t1, t2, t3: temporary registers 6843 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 6844 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6845 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6846 // cmpxchg clobbers rscratch1. 6847 assert_different_registers(obj, t1, t2, t3, rscratch1); 6848 6849 #ifdef ASSERT 6850 { 6851 // Check for lock-stack underflow. 6852 Label stack_ok; 6853 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6854 cmpw(t1, (unsigned)LockStack::start_offset()); 6855 br(Assembler::GE, stack_ok); 6856 STOP("Lock-stack underflow"); 6857 bind(stack_ok); 6858 } 6859 #endif 6860 6861 Label unlocked, push_and_slow; 6862 const Register top = t1; 6863 const Register mark = t2; 6864 const Register t = t3; 6865 6866 // Check if obj is top of lock-stack. 6867 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6868 subw(top, top, oopSize); 6869 ldr(t, Address(rthread, top)); 6870 cmp(obj, t); 6871 br(Assembler::NE, slow); 6872 6873 // Pop lock-stack. 6874 DEBUG_ONLY(str(zr, Address(rthread, top));) 6875 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6876 6877 // Check if recursive. 6878 subw(t, top, oopSize); 6879 ldr(t, Address(rthread, t)); 6880 cmp(obj, t); 6881 br(Assembler::EQ, unlocked); 6882 6883 // Not recursive. Check header for monitor (0b10). 6884 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6885 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 6886 6887 #ifdef ASSERT 6888 // Check header not unlocked (0b01). 6889 Label not_unlocked; 6890 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 6891 stop("lightweight_unlock already unlocked"); 6892 bind(not_unlocked); 6893 #endif 6894 6895 // Try to unlock. Transition lock bits 0b00 => 0b01 6896 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6897 orr(t, mark, markWord::unlocked_value); 6898 cmpxchg(obj, mark, t, Assembler::xword, 6899 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 6900 br(Assembler::EQ, unlocked); 6901 6902 bind(push_and_slow); 6903 // Restore lock-stack and handle the unlock in runtime. 6904 DEBUG_ONLY(str(obj, Address(rthread, top));) 6905 addw(top, top, oopSize); 6906 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6907 b(slow); 6908 6909 bind(unlocked); 6910 }