1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "code/SCCache.hpp" 32 #include "code/compiledIC.hpp" 33 #include "compiler/compileTask.hpp" 34 #include "compiler/disassembler.hpp" 35 #include "compiler/oopMap.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/barrierSetAssembler.hpp" 38 #include "gc/shared/cardTableBarrierSet.hpp" 39 #include "gc/shared/cardTable.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "gc/shared/tlab_globals.hpp" 42 #include "interpreter/bytecodeHistogram.hpp" 43 #include "interpreter/interpreter.hpp" 44 #include "jvm.h" 45 #include "memory/resourceArea.hpp" 46 #include "memory/universe.hpp" 47 #include "nativeInst_aarch64.hpp" 48 #include "oops/accessDecorators.hpp" 49 #include "oops/compressedKlass.inline.hpp" 50 #include "oops/compressedOops.inline.hpp" 51 #include "oops/klass.inline.hpp" 52 #include "runtime/continuation.hpp" 53 #include "runtime/icache.hpp" 54 #include "runtime/interfaceSupport.inline.hpp" 55 #include "runtime/javaThread.hpp" 56 #include "runtime/jniHandles.inline.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "utilities/globalDefinitions.hpp" 60 #include "utilities/powerOfTwo.hpp" 61 #ifdef COMPILER1 62 #include "c1/c1_LIRAssembler.hpp" 63 #endif 64 #ifdef COMPILER2 65 #include "oops/oop.hpp" 66 #include "opto/compile.hpp" 67 #include "opto/node.hpp" 68 #include "opto/output.hpp" 69 #endif 70 71 #include <sys/types.h> 72 73 #ifdef PRODUCT 74 #define BLOCK_COMMENT(str) /* nothing */ 75 #else 76 #define BLOCK_COMMENT(str) block_comment(str) 77 #endif 78 #define STOP(str) stop(str); 79 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 80 81 #ifdef ASSERT 82 extern "C" void disnm(intptr_t p); 83 #endif 84 // Target-dependent relocation processing 85 // 86 // Instruction sequences whose target may need to be retrieved or 87 // patched are distinguished by their leading instruction, sorting 88 // them into three main instruction groups and related subgroups. 89 // 90 // 1) Branch, Exception and System (insn count = 1) 91 // 1a) Unconditional branch (immediate): 92 // b/bl imm19 93 // 1b) Compare & branch (immediate): 94 // cbz/cbnz Rt imm19 95 // 1c) Test & branch (immediate): 96 // tbz/tbnz Rt imm14 97 // 1d) Conditional branch (immediate): 98 // b.cond imm19 99 // 100 // 2) Loads and Stores (insn count = 1) 101 // 2a) Load register literal: 102 // ldr Rt imm19 103 // 104 // 3) Data Processing Immediate (insn count = 2 or 3) 105 // 3a) PC-rel. addressing 106 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 107 // adr/adrp Rx imm21; add Ry Rx #imm12 108 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 109 // adr/adrp Rx imm21 110 // adr/adrp Rx imm21; movk Rx #imm16<<32 111 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 112 // The latter form can only happen when the target is an 113 // ExternalAddress, and (by definition) ExternalAddresses don't 114 // move. Because of that property, there is never any need to 115 // patch the last of the three instructions. However, 116 // MacroAssembler::target_addr_for_insn takes all three 117 // instructions into account and returns the correct address. 118 // 3b) Move wide (immediate) 119 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 120 // 121 // A switch on a subset of the instruction's bits provides an 122 // efficient dispatch to these subcases. 123 // 124 // insn[28:26] -> main group ('x' == don't care) 125 // 00x -> UNALLOCATED 126 // 100 -> Data Processing Immediate 127 // 101 -> Branch, Exception and System 128 // x1x -> Loads and Stores 129 // 130 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 131 // n.b. in some cases extra bits need to be checked to verify the 132 // instruction is as expected 133 // 134 // 1) ... xx101x Branch, Exception and System 135 // 1a) 00___x Unconditional branch (immediate) 136 // 1b) 01___0 Compare & branch (immediate) 137 // 1c) 01___1 Test & branch (immediate) 138 // 1d) 10___0 Conditional branch (immediate) 139 // other Should not happen 140 // 141 // 2) ... xxx1x0 Loads and Stores 142 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 143 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 144 // strictly should be 64 bit non-FP/SIMD i.e. 145 // 0101_000 (i.e. requires insn[31:24] == 01011000) 146 // 147 // 3) ... xx100x Data Processing Immediate 148 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 149 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 150 // strictly should be 64 bit movz #imm16<<0 151 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 152 // 153 class RelocActions { 154 protected: 155 typedef int (*reloc_insn)(address insn_addr, address &target); 156 157 virtual reloc_insn adrpMem() = 0; 158 virtual reloc_insn adrpAdd() = 0; 159 virtual reloc_insn adrpMovk() = 0; 160 161 const address _insn_addr; 162 const uint32_t _insn; 163 164 static uint32_t insn_at(address insn_addr, int n) { 165 return ((uint32_t*)insn_addr)[n]; 166 } 167 uint32_t insn_at(int n) const { 168 return insn_at(_insn_addr, n); 169 } 170 171 public: 172 173 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 174 RelocActions(address insn_addr, uint32_t insn) 175 : _insn_addr(insn_addr), _insn(insn) {} 176 177 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 178 virtual int conditionalBranch(address insn_addr, address &target) = 0; 179 virtual int testAndBranch(address insn_addr, address &target) = 0; 180 virtual int loadStore(address insn_addr, address &target) = 0; 181 virtual int adr(address insn_addr, address &target) = 0; 182 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 183 virtual int immediate(address insn_addr, address &target) = 0; 184 virtual void verify(address insn_addr, address &target) = 0; 185 186 int ALWAYSINLINE run(address insn_addr, address &target) { 187 int instructions = 1; 188 189 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 190 switch(dispatch) { 191 case 0b001010: 192 case 0b001011: { 193 instructions = unconditionalBranch(insn_addr, target); 194 break; 195 } 196 case 0b101010: // Conditional branch (immediate) 197 case 0b011010: { // Compare & branch (immediate) 198 instructions = conditionalBranch(insn_addr, target); 199 break; 200 } 201 case 0b011011: { 202 instructions = testAndBranch(insn_addr, target); 203 break; 204 } 205 case 0b001100: 206 case 0b001110: 207 case 0b011100: 208 case 0b011110: 209 case 0b101100: 210 case 0b101110: 211 case 0b111100: 212 case 0b111110: { 213 // load/store 214 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 215 // Load register (literal) 216 instructions = loadStore(insn_addr, target); 217 break; 218 } else { 219 // nothing to do 220 assert(target == 0, "did not expect to relocate target for polling page load"); 221 } 222 break; 223 } 224 case 0b001000: 225 case 0b011000: 226 case 0b101000: 227 case 0b111000: { 228 // adr/adrp 229 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 230 int shift = Instruction_aarch64::extract(_insn, 31, 31); 231 if (shift) { 232 uint32_t insn2 = insn_at(1); 233 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 234 Instruction_aarch64::extract(_insn, 4, 0) == 235 Instruction_aarch64::extract(insn2, 9, 5)) { 236 instructions = adrp(insn_addr, target, adrpMem()); 237 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 238 Instruction_aarch64::extract(_insn, 4, 0) == 239 Instruction_aarch64::extract(insn2, 4, 0)) { 240 instructions = adrp(insn_addr, target, adrpAdd()); 241 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 242 Instruction_aarch64::extract(_insn, 4, 0) == 243 Instruction_aarch64::extract(insn2, 4, 0)) { 244 instructions = adrp(insn_addr, target, adrpMovk()); 245 } else { 246 ShouldNotReachHere(); 247 } 248 } else { 249 instructions = adr(insn_addr, target); 250 } 251 break; 252 } 253 case 0b001001: 254 case 0b011001: 255 case 0b101001: 256 case 0b111001: { 257 instructions = immediate(insn_addr, target); 258 break; 259 } 260 default: { 261 ShouldNotReachHere(); 262 } 263 } 264 265 verify(insn_addr, target); 266 return instructions * NativeInstruction::instruction_size; 267 } 268 }; 269 270 class Patcher : public RelocActions { 271 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 272 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 273 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 274 275 public: 276 Patcher(address insn_addr) : RelocActions(insn_addr) {} 277 278 virtual int unconditionalBranch(address insn_addr, address &target) { 279 intptr_t offset = (target - insn_addr) >> 2; 280 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 281 return 1; 282 } 283 virtual int conditionalBranch(address insn_addr, address &target) { 284 intptr_t offset = (target - insn_addr) >> 2; 285 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 286 return 1; 287 } 288 virtual int testAndBranch(address insn_addr, address &target) { 289 intptr_t offset = (target - insn_addr) >> 2; 290 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 291 return 1; 292 } 293 virtual int loadStore(address insn_addr, address &target) { 294 intptr_t offset = (target - insn_addr) >> 2; 295 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 296 return 1; 297 } 298 virtual int adr(address insn_addr, address &target) { 299 #ifdef ASSERT 300 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 301 #endif 302 // PC-rel. addressing 303 ptrdiff_t offset = target - insn_addr; 304 int offset_lo = offset & 3; 305 offset >>= 2; 306 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 307 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 308 return 1; 309 } 310 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 311 int instructions = 1; 312 #ifdef ASSERT 313 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 314 #endif 315 ptrdiff_t offset = target - insn_addr; 316 instructions = 2; 317 precond(inner != nullptr); 318 // Give the inner reloc a chance to modify the target. 319 address adjusted_target = target; 320 instructions = (*inner)(insn_addr, adjusted_target); 321 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 322 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 323 offset = adr_page - pc_page; 324 int offset_lo = offset & 3; 325 offset >>= 2; 326 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 327 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 328 return instructions; 329 } 330 static int adrpMem_impl(address insn_addr, address &target) { 331 uintptr_t dest = (uintptr_t)target; 332 int offset_lo = dest & 0xfff; 333 uint32_t insn2 = insn_at(insn_addr, 1); 334 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 335 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 336 guarantee(((dest >> size) << size) == dest, "misaligned target"); 337 return 2; 338 } 339 static int adrpAdd_impl(address insn_addr, address &target) { 340 uintptr_t dest = (uintptr_t)target; 341 int offset_lo = dest & 0xfff; 342 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 343 return 2; 344 } 345 static int adrpMovk_impl(address insn_addr, address &target) { 346 uintptr_t dest = uintptr_t(target); 347 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 348 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 349 target = address(dest); 350 return 2; 351 } 352 virtual int immediate(address insn_addr, address &target) { 353 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 354 // We encode narrow ones by setting the upper 16 bits in the first 355 // instruction. 356 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 357 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 358 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target); 359 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16); 360 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff); 361 return 2; 362 } 363 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 364 uint64_t dest = (uint64_t)target; 365 // Move wide constant 366 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 367 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 368 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 369 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 370 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 371 return 3; 372 } 373 virtual void verify(address insn_addr, address &target) { 374 #ifdef ASSERT 375 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 376 if (!(address_is == target)) { 377 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 378 disnm((intptr_t)insn_addr); 379 assert(address_is == target, "should be"); 380 } 381 #endif 382 } 383 }; 384 385 // If insn1 and insn2 use the same register to form an address, either 386 // by an offsetted LDR or a simple ADD, return the offset. If the 387 // second instruction is an LDR, the offset may be scaled. 388 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 389 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 390 Instruction_aarch64::extract(insn1, 4, 0) == 391 Instruction_aarch64::extract(insn2, 9, 5)) { 392 // Load/store register (unsigned immediate) 393 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 394 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 395 byte_offset <<= size; 396 return true; 397 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 398 Instruction_aarch64::extract(insn1, 4, 0) == 399 Instruction_aarch64::extract(insn2, 4, 0)) { 400 // add (immediate) 401 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 402 return true; 403 } 404 return false; 405 } 406 407 class AArch64Decoder : public RelocActions { 408 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 409 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 410 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 411 412 public: 413 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 414 415 virtual int loadStore(address insn_addr, address &target) { 416 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 417 target = insn_addr + (offset << 2); 418 return 1; 419 } 420 virtual int unconditionalBranch(address insn_addr, address &target) { 421 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 422 target = insn_addr + (offset << 2); 423 return 1; 424 } 425 virtual int conditionalBranch(address insn_addr, address &target) { 426 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 427 target = address(((uint64_t)insn_addr + (offset << 2))); 428 return 1; 429 } 430 virtual int testAndBranch(address insn_addr, address &target) { 431 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 432 target = address(((uint64_t)insn_addr + (offset << 2))); 433 return 1; 434 } 435 virtual int adr(address insn_addr, address &target) { 436 // PC-rel. addressing 437 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 438 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 439 target = address((uint64_t)insn_addr + offset); 440 return 1; 441 } 442 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 443 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 444 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 445 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 446 int shift = 12; 447 offset <<= shift; 448 uint64_t target_page = ((uint64_t)insn_addr) + offset; 449 target_page &= ((uint64_t)-1) << shift; 450 uint32_t insn2 = insn_at(1); 451 target = address(target_page); 452 precond(inner != nullptr); 453 (*inner)(insn_addr, target); 454 return 2; 455 } 456 static int adrpMem_impl(address insn_addr, address &target) { 457 uint32_t insn2 = insn_at(insn_addr, 1); 458 // Load/store register (unsigned immediate) 459 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 460 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 461 byte_offset <<= size; 462 target += byte_offset; 463 return 2; 464 } 465 static int adrpAdd_impl(address insn_addr, address &target) { 466 uint32_t insn2 = insn_at(insn_addr, 1); 467 // add (immediate) 468 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 469 target += byte_offset; 470 return 2; 471 } 472 static int adrpMovk_impl(address insn_addr, address &target) { 473 uint32_t insn2 = insn_at(insn_addr, 1); 474 uint64_t dest = uint64_t(target); 475 dest = (dest & 0xffff0000ffffffff) | 476 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 477 target = address(dest); 478 479 // We know the destination 4k page. Maybe we have a third 480 // instruction. 481 uint32_t insn = insn_at(insn_addr, 0); 482 uint32_t insn3 = insn_at(insn_addr, 2); 483 ptrdiff_t byte_offset; 484 if (offset_for(insn, insn3, byte_offset)) { 485 target += byte_offset; 486 return 3; 487 } else { 488 return 2; 489 } 490 } 491 virtual int immediate(address insn_addr, address &target) { 492 uint32_t *insns = (uint32_t *)insn_addr; 493 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 494 // We encode narrow ones by setting the upper 16 bits in the first 495 // instruction. 496 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 497 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 498 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16) 499 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5))); 500 target = (address)CompressedKlassPointers::decode(nk); 501 return 2; 502 } 503 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 504 // Move wide constant: movz, movk, movk. See movptr(). 505 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 506 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 507 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 508 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 509 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 510 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 511 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 512 return 3; 513 } 514 virtual void verify(address insn_addr, address &target) { 515 } 516 }; 517 518 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 519 AArch64Decoder decoder(insn_addr, insn); 520 address target; 521 decoder.run(insn_addr, target); 522 return target; 523 } 524 525 // Patch any kind of instruction; there may be several instructions. 526 // Return the total length (in bytes) of the instructions. 527 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 528 Patcher patcher(insn_addr); 529 return patcher.run(insn_addr, target); 530 } 531 532 int MacroAssembler::patch_oop(address insn_addr, address o) { 533 int instructions; 534 unsigned insn = *(unsigned*)insn_addr; 535 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 536 537 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 538 // narrow OOPs by setting the upper 16 bits in the first 539 // instruction. 540 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 541 // Move narrow OOP 542 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 543 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 544 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 545 instructions = 2; 546 } else { 547 // Move wide OOP 548 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 549 uintptr_t dest = (uintptr_t)o; 550 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 551 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 552 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 553 instructions = 3; 554 } 555 return instructions * NativeInstruction::instruction_size; 556 } 557 558 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 559 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 560 // We encode narrow ones by setting the upper 16 bits in the first 561 // instruction. 562 NativeInstruction *insn = nativeInstruction_at(insn_addr); 563 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 564 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 565 566 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 567 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 568 return 2 * NativeInstruction::instruction_size; 569 } 570 571 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 572 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 573 return nullptr; 574 } 575 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 576 } 577 578 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 579 if (acquire) { 580 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 581 ldar(tmp, tmp); 582 } else { 583 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 584 } 585 if (at_return) { 586 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 587 // we may safely use the sp instead to perform the stack watermark check. 588 cmp(in_nmethod ? sp : rfp, tmp); 589 br(Assembler::HI, slow_path); 590 } else { 591 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 592 } 593 } 594 595 void MacroAssembler::rt_call(address dest, Register tmp) { 596 CodeBlob *cb = CodeCache::find_blob(dest); 597 if (cb) { 598 far_call(RuntimeAddress(dest)); 599 } else { 600 lea(tmp, RuntimeAddress(dest)); 601 blr(tmp); 602 } 603 } 604 605 void MacroAssembler::push_cont_fastpath(Register java_thread) { 606 if (!Continuations::enabled()) return; 607 Label done; 608 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 609 cmp(sp, rscratch1); 610 br(Assembler::LS, done); 611 mov(rscratch1, sp); // we can't use sp as the source in str 612 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 613 bind(done); 614 } 615 616 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 617 if (!Continuations::enabled()) return; 618 Label done; 619 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 620 cmp(sp, rscratch1); 621 br(Assembler::LO, done); 622 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 623 bind(done); 624 } 625 626 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 627 // we must set sp to zero to clear frame 628 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 629 630 // must clear fp, so that compiled frames are not confused; it is 631 // possible that we need it only for debugging 632 if (clear_fp) { 633 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 634 } 635 636 // Always clear the pc because it could have been set by make_walkable() 637 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 638 } 639 640 // Calls to C land 641 // 642 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 643 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 644 // has to be reset to 0. This is required to allow proper stack traversal. 645 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 646 Register last_java_fp, 647 Register last_java_pc, 648 Register scratch) { 649 650 if (last_java_pc->is_valid()) { 651 str(last_java_pc, Address(rthread, 652 JavaThread::frame_anchor_offset() 653 + JavaFrameAnchor::last_Java_pc_offset())); 654 } 655 656 // determine last_java_sp register 657 if (last_java_sp == sp) { 658 mov(scratch, sp); 659 last_java_sp = scratch; 660 } else if (!last_java_sp->is_valid()) { 661 last_java_sp = esp; 662 } 663 664 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 665 666 // last_java_fp is optional 667 if (last_java_fp->is_valid()) { 668 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 669 } 670 } 671 672 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 673 Register last_java_fp, 674 address last_java_pc, 675 Register scratch) { 676 assert(last_java_pc != nullptr, "must provide a valid PC"); 677 678 adr(scratch, last_java_pc); 679 str(scratch, Address(rthread, 680 JavaThread::frame_anchor_offset() 681 + JavaFrameAnchor::last_Java_pc_offset())); 682 683 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 684 } 685 686 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 687 Register last_java_fp, 688 Label &L, 689 Register scratch) { 690 if (L.is_bound()) { 691 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 692 } else { 693 InstructionMark im(this); 694 L.add_patch_at(code(), locator()); 695 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 696 } 697 } 698 699 static inline bool target_needs_far_branch(address addr) { 700 if (SCCache::is_on_for_write()) { 701 return true; 702 } 703 // codecache size <= 128M 704 if (!MacroAssembler::far_branches()) { 705 return false; 706 } 707 // codecache size > 240M 708 if (MacroAssembler::codestub_branch_needs_far_jump()) { 709 return true; 710 } 711 // codecache size: 128M..240M 712 return !CodeCache::is_non_nmethod(addr); 713 } 714 715 void MacroAssembler::far_call(Address entry, Register tmp) { 716 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 717 assert(CodeCache::find_blob(entry.target()) != nullptr, 718 "destination of far call not found in code cache"); 719 assert(entry.rspec().type() == relocInfo::external_word_type 720 || entry.rspec().type() == relocInfo::runtime_call_type 721 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 722 if (target_needs_far_branch(entry.target())) { 723 uint64_t offset; 724 // We can use ADRP here because we know that the total size of 725 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 726 adrp(tmp, entry, offset); 727 add(tmp, tmp, offset); 728 blr(tmp); 729 } else { 730 bl(entry); 731 } 732 } 733 734 int MacroAssembler::far_jump(Address entry, Register tmp) { 735 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 736 assert(CodeCache::find_blob(entry.target()) != nullptr, 737 "destination of far call not found in code cache"); 738 assert(entry.rspec().type() == relocInfo::external_word_type 739 || entry.rspec().type() == relocInfo::runtime_call_type 740 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 741 address start = pc(); 742 if (target_needs_far_branch(entry.target())) { 743 uint64_t offset; 744 // We can use ADRP here because we know that the total size of 745 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 746 adrp(tmp, entry, offset); 747 add(tmp, tmp, offset); 748 br(tmp); 749 } else { 750 b(entry); 751 } 752 return pc() - start; 753 } 754 755 void MacroAssembler::reserved_stack_check() { 756 // testing if reserved zone needs to be enabled 757 Label no_reserved_zone_enabling; 758 759 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 760 cmp(sp, rscratch1); 761 br(Assembler::LO, no_reserved_zone_enabling); 762 763 enter(); // LR and FP are live. 764 lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); 765 mov(c_rarg0, rthread); 766 blr(rscratch1); 767 leave(); 768 769 // We have already removed our own frame. 770 // throw_delayed_StackOverflowError will think that it's been 771 // called by our caller. 772 lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 773 br(rscratch1); 774 should_not_reach_here(); 775 776 bind(no_reserved_zone_enabling); 777 } 778 779 static void pass_arg0(MacroAssembler* masm, Register arg) { 780 if (c_rarg0 != arg ) { 781 masm->mov(c_rarg0, arg); 782 } 783 } 784 785 static void pass_arg1(MacroAssembler* masm, Register arg) { 786 if (c_rarg1 != arg ) { 787 masm->mov(c_rarg1, arg); 788 } 789 } 790 791 static void pass_arg2(MacroAssembler* masm, Register arg) { 792 if (c_rarg2 != arg ) { 793 masm->mov(c_rarg2, arg); 794 } 795 } 796 797 static void pass_arg3(MacroAssembler* masm, Register arg) { 798 if (c_rarg3 != arg ) { 799 masm->mov(c_rarg3, arg); 800 } 801 } 802 803 void MacroAssembler::call_VM_base(Register oop_result, 804 Register java_thread, 805 Register last_java_sp, 806 address entry_point, 807 int number_of_arguments, 808 bool check_exceptions) { 809 // determine java_thread register 810 if (!java_thread->is_valid()) { 811 java_thread = rthread; 812 } 813 814 // determine last_java_sp register 815 if (!last_java_sp->is_valid()) { 816 last_java_sp = esp; 817 } 818 819 // debugging support 820 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 821 assert(java_thread == rthread, "unexpected register"); 822 #ifdef ASSERT 823 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 824 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 825 #endif // ASSERT 826 827 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 828 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 829 830 // push java thread (becomes first argument of C function) 831 832 mov(c_rarg0, java_thread); 833 834 // set last Java frame before call 835 assert(last_java_sp != rfp, "can't use rfp"); 836 837 Label l; 838 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 839 840 // do the call, remove parameters 841 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 842 843 // lr could be poisoned with PAC signature during throw_pending_exception 844 // if it was tail-call optimized by compiler, since lr is not callee-saved 845 // reload it with proper value 846 adr(lr, l); 847 848 // reset last Java frame 849 // Only interpreter should have to clear fp 850 reset_last_Java_frame(true); 851 852 // C++ interp handles this in the interpreter 853 check_and_handle_popframe(java_thread); 854 check_and_handle_earlyret(java_thread); 855 856 if (check_exceptions) { 857 // check for pending exceptions (java_thread is set upon return) 858 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 859 Label ok; 860 cbz(rscratch1, ok); 861 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 862 br(rscratch1); 863 bind(ok); 864 } 865 866 // get oop result if there is one and reset the value in the thread 867 if (oop_result->is_valid()) { 868 get_vm_result(oop_result, java_thread); 869 } 870 } 871 872 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 873 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 874 } 875 876 // Check the entry target is always reachable from any branch. 877 static bool is_always_within_branch_range(Address entry) { 878 const address target = entry.target(); 879 880 if (!CodeCache::contains(target)) { 881 // We always use trampolines for callees outside CodeCache. 882 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 883 return false; 884 } 885 886 if (!MacroAssembler::far_branches()) { 887 return true; 888 } 889 890 if (entry.rspec().type() == relocInfo::runtime_call_type) { 891 // Runtime calls are calls of a non-compiled method (stubs, adapters). 892 // Non-compiled methods stay forever in CodeCache. 893 // We check whether the longest possible branch is within the branch range. 894 assert(CodeCache::find_blob(target) != nullptr && 895 !CodeCache::find_blob(target)->is_nmethod(), 896 "runtime call of compiled method"); 897 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 898 const address left_longest_branch_start = CodeCache::low_bound(); 899 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 900 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 901 return is_reachable; 902 } 903 904 return false; 905 } 906 907 // Maybe emit a call via a trampoline. If the code cache is small 908 // trampolines won't be emitted. 909 address MacroAssembler::trampoline_call(Address entry) { 910 assert(entry.rspec().type() == relocInfo::runtime_call_type 911 || entry.rspec().type() == relocInfo::opt_virtual_call_type 912 || entry.rspec().type() == relocInfo::static_call_type 913 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 914 915 address target = entry.target(); 916 917 if (!is_always_within_branch_range(entry)) { 918 if (!in_scratch_emit_size()) { 919 // We don't want to emit a trampoline if C2 is generating dummy 920 // code during its branch shortening phase. 921 if (entry.rspec().type() == relocInfo::runtime_call_type) { 922 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 923 code()->share_trampoline_for(entry.target(), offset()); 924 } else { 925 address stub = emit_trampoline_stub(offset(), target); 926 if (stub == nullptr) { 927 postcond(pc() == badAddress); 928 return nullptr; // CodeCache is full 929 } 930 } 931 } 932 target = pc(); 933 } 934 935 address call_pc = pc(); 936 relocate(entry.rspec()); 937 bl(target); 938 939 postcond(pc() != badAddress); 940 return call_pc; 941 } 942 943 // Emit a trampoline stub for a call to a target which is too far away. 944 // 945 // code sequences: 946 // 947 // call-site: 948 // branch-and-link to <destination> or <trampoline stub> 949 // 950 // Related trampoline stub for this call site in the stub section: 951 // load the call target from the constant pool 952 // branch (LR still points to the call site above) 953 954 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 955 address dest) { 956 // Max stub size: alignment nop, TrampolineStub. 957 address stub = start_a_stub(max_trampoline_stub_size()); 958 if (stub == nullptr) { 959 return nullptr; // CodeBuffer::expand failed 960 } 961 962 // Create a trampoline stub relocation which relates this trampoline stub 963 // with the call instruction at insts_call_instruction_offset in the 964 // instructions code-section. 965 align(wordSize); 966 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 967 + insts_call_instruction_offset)); 968 const int stub_start_offset = offset(); 969 970 // Now, create the trampoline stub's code: 971 // - load the call 972 // - call 973 Label target; 974 ldr(rscratch1, target); 975 br(rscratch1); 976 bind(target); 977 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 978 "should be"); 979 emit_int64((int64_t)dest); 980 981 const address stub_start_addr = addr_at(stub_start_offset); 982 983 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 984 985 end_a_stub(); 986 return stub_start_addr; 987 } 988 989 int MacroAssembler::max_trampoline_stub_size() { 990 // Max stub size: alignment nop, TrampolineStub. 991 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 992 } 993 994 void MacroAssembler::emit_static_call_stub() { 995 // CompiledDirectCall::set_to_interpreted knows the 996 // exact layout of this stub. 997 998 isb(); 999 mov_metadata(rmethod, nullptr); 1000 1001 // Jump to the entry point of the c2i stub. 1002 movptr(rscratch1, 0); 1003 br(rscratch1); 1004 } 1005 1006 int MacroAssembler::static_call_stub_size() { 1007 // isb; movk; movz; movz; movk; movz; movz; br 1008 return 8 * NativeInstruction::instruction_size; 1009 } 1010 1011 void MacroAssembler::c2bool(Register x) { 1012 // implements x == 0 ? 0 : 1 1013 // note: must only look at least-significant byte of x 1014 // since C-style booleans are stored in one byte 1015 // only! (was bug) 1016 tst(x, 0xff); 1017 cset(x, Assembler::NE); 1018 } 1019 1020 address MacroAssembler::ic_call(address entry, jint method_index) { 1021 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1022 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1023 // uintptr_t offset; 1024 // ldr_constant(rscratch2, const_ptr); 1025 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1026 return trampoline_call(Address(entry, rh)); 1027 } 1028 1029 int MacroAssembler::ic_check_size() { 1030 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1031 return NativeInstruction::instruction_size * 7; 1032 } else { 1033 return NativeInstruction::instruction_size * 5; 1034 } 1035 } 1036 1037 int MacroAssembler::ic_check(int end_alignment) { 1038 Register receiver = j_rarg0; 1039 Register data = rscratch2; 1040 Register tmp1 = rscratch1; 1041 Register tmp2 = r10; 1042 1043 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1044 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1045 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1046 // before the inline cache check here, and not after 1047 align(end_alignment, offset() + ic_check_size()); 1048 1049 int uep_offset = offset(); 1050 1051 if (UseCompressedClassPointers) { 1052 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1053 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1054 cmpw(tmp1, tmp2); 1055 } else { 1056 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1057 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1058 cmp(tmp1, tmp2); 1059 } 1060 1061 Label dont; 1062 br(Assembler::EQ, dont); 1063 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1064 bind(dont); 1065 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1066 1067 return uep_offset; 1068 } 1069 1070 // Implementation of call_VM versions 1071 1072 void MacroAssembler::call_VM(Register oop_result, 1073 address entry_point, 1074 bool check_exceptions) { 1075 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1076 } 1077 1078 void MacroAssembler::call_VM(Register oop_result, 1079 address entry_point, 1080 Register arg_1, 1081 bool check_exceptions) { 1082 pass_arg1(this, arg_1); 1083 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1084 } 1085 1086 void MacroAssembler::call_VM(Register oop_result, 1087 address entry_point, 1088 Register arg_1, 1089 Register arg_2, 1090 bool check_exceptions) { 1091 assert_different_registers(arg_1, c_rarg2); 1092 pass_arg2(this, arg_2); 1093 pass_arg1(this, arg_1); 1094 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1095 } 1096 1097 void MacroAssembler::call_VM(Register oop_result, 1098 address entry_point, 1099 Register arg_1, 1100 Register arg_2, 1101 Register arg_3, 1102 bool check_exceptions) { 1103 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1104 assert_different_registers(arg_2, c_rarg3); 1105 pass_arg3(this, arg_3); 1106 1107 pass_arg2(this, arg_2); 1108 1109 pass_arg1(this, arg_1); 1110 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1111 } 1112 1113 void MacroAssembler::call_VM(Register oop_result, 1114 Register last_java_sp, 1115 address entry_point, 1116 int number_of_arguments, 1117 bool check_exceptions) { 1118 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1119 } 1120 1121 void MacroAssembler::call_VM(Register oop_result, 1122 Register last_java_sp, 1123 address entry_point, 1124 Register arg_1, 1125 bool check_exceptions) { 1126 pass_arg1(this, arg_1); 1127 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1128 } 1129 1130 void MacroAssembler::call_VM(Register oop_result, 1131 Register last_java_sp, 1132 address entry_point, 1133 Register arg_1, 1134 Register arg_2, 1135 bool check_exceptions) { 1136 1137 assert_different_registers(arg_1, c_rarg2); 1138 pass_arg2(this, arg_2); 1139 pass_arg1(this, arg_1); 1140 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1141 } 1142 1143 void MacroAssembler::call_VM(Register oop_result, 1144 Register last_java_sp, 1145 address entry_point, 1146 Register arg_1, 1147 Register arg_2, 1148 Register arg_3, 1149 bool check_exceptions) { 1150 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1151 assert_different_registers(arg_2, c_rarg3); 1152 pass_arg3(this, arg_3); 1153 pass_arg2(this, arg_2); 1154 pass_arg1(this, arg_1); 1155 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1156 } 1157 1158 1159 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1160 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1161 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1162 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1163 } 1164 1165 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1166 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1167 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1168 } 1169 1170 void MacroAssembler::align(int modulus) { 1171 align(modulus, offset()); 1172 } 1173 1174 // Ensure that the code at target bytes offset from the current offset() is aligned 1175 // according to modulus. 1176 void MacroAssembler::align(int modulus, int target) { 1177 int delta = target - offset(); 1178 while ((offset() + delta) % modulus != 0) nop(); 1179 } 1180 1181 void MacroAssembler::post_call_nop() { 1182 if (!Continuations::enabled()) { 1183 return; 1184 } 1185 InstructionMark im(this); 1186 relocate(post_call_nop_Relocation::spec()); 1187 InlineSkippedInstructionsCounter skipCounter(this); 1188 nop(); 1189 movk(zr, 0); 1190 movk(zr, 0); 1191 } 1192 1193 // these are no-ops overridden by InterpreterMacroAssembler 1194 1195 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1196 1197 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1198 1199 // Look up the method for a megamorphic invokeinterface call. 1200 // The target method is determined by <intf_klass, itable_index>. 1201 // The receiver klass is in recv_klass. 1202 // On success, the result will be in method_result, and execution falls through. 1203 // On failure, execution transfers to the given label. 1204 void MacroAssembler::lookup_interface_method(Register recv_klass, 1205 Register intf_klass, 1206 RegisterOrConstant itable_index, 1207 Register method_result, 1208 Register scan_temp, 1209 Label& L_no_such_interface, 1210 bool return_method) { 1211 assert_different_registers(recv_klass, intf_klass, scan_temp); 1212 assert_different_registers(method_result, intf_klass, scan_temp); 1213 assert(recv_klass != method_result || !return_method, 1214 "recv_klass can be destroyed when method isn't needed"); 1215 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1216 "caller must use same register for non-constant itable index as for method"); 1217 1218 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1219 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1220 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1221 int scan_step = itableOffsetEntry::size() * wordSize; 1222 int vte_size = vtableEntry::size_in_bytes(); 1223 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1224 1225 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1226 1227 // Could store the aligned, prescaled offset in the klass. 1228 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1229 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1230 add(scan_temp, scan_temp, vtable_base); 1231 1232 if (return_method) { 1233 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1234 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1235 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1236 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1237 if (itentry_off) 1238 add(recv_klass, recv_klass, itentry_off); 1239 } 1240 1241 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1242 // if (scan->interface() == intf) { 1243 // result = (klass + scan->offset() + itable_index); 1244 // } 1245 // } 1246 Label search, found_method; 1247 1248 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1249 cmp(intf_klass, method_result); 1250 br(Assembler::EQ, found_method); 1251 bind(search); 1252 // Check that the previous entry is non-null. A null entry means that 1253 // the receiver class doesn't implement the interface, and wasn't the 1254 // same as when the caller was compiled. 1255 cbz(method_result, L_no_such_interface); 1256 if (itableOffsetEntry::interface_offset() != 0) { 1257 add(scan_temp, scan_temp, scan_step); 1258 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1259 } else { 1260 ldr(method_result, Address(pre(scan_temp, scan_step))); 1261 } 1262 cmp(intf_klass, method_result); 1263 br(Assembler::NE, search); 1264 1265 bind(found_method); 1266 1267 // Got a hit. 1268 if (return_method) { 1269 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1270 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1271 } 1272 } 1273 1274 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1275 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1276 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1277 // The target method is determined by <holder_klass, itable_index>. 1278 // The receiver klass is in recv_klass. 1279 // On success, the result will be in method_result, and execution falls through. 1280 // On failure, execution transfers to the given label. 1281 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1282 Register holder_klass, 1283 Register resolved_klass, 1284 Register method_result, 1285 Register temp_itbl_klass, 1286 Register scan_temp, 1287 int itable_index, 1288 Label& L_no_such_interface) { 1289 // 'method_result' is only used as output register at the very end of this method. 1290 // Until then we can reuse it as 'holder_offset'. 1291 Register holder_offset = method_result; 1292 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1293 1294 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1295 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1296 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1297 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1298 1299 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1300 1301 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1302 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1303 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1304 // temp_itbl_klass = itable[0]._interface; 1305 int vtblEntrySize = vtableEntry::size_in_bytes(); 1306 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1307 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1308 mov(holder_offset, zr); 1309 // scan_temp = &(itable[0]._interface) 1310 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1311 1312 // Initial checks: 1313 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1314 // - if (itable[0] == holder_klass), shortcut to "holder found" 1315 // - if (itable[0] == 0), no such interface 1316 cmp(resolved_klass, holder_klass); 1317 br(Assembler::NE, L_loop_search_resolved_entry); 1318 cmp(holder_klass, temp_itbl_klass); 1319 br(Assembler::EQ, L_holder_found); 1320 cbz(temp_itbl_klass, L_no_such_interface); 1321 1322 // Loop: Look for holder_klass record in itable 1323 // do { 1324 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1325 // if (temp_itbl_klass == holder_klass) { 1326 // goto L_holder_found; // Found! 1327 // } 1328 // } while (temp_itbl_klass != 0); 1329 // goto L_no_such_interface // Not found. 1330 Label L_search_holder; 1331 bind(L_search_holder); 1332 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1333 cmp(holder_klass, temp_itbl_klass); 1334 br(Assembler::EQ, L_holder_found); 1335 cbnz(temp_itbl_klass, L_search_holder); 1336 1337 b(L_no_such_interface); 1338 1339 // Loop: Look for resolved_class record in itable 1340 // while (true) { 1341 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1342 // if (temp_itbl_klass == 0) { 1343 // goto L_no_such_interface; 1344 // } 1345 // if (temp_itbl_klass == resolved_klass) { 1346 // goto L_resolved_found; // Found! 1347 // } 1348 // if (temp_itbl_klass == holder_klass) { 1349 // holder_offset = scan_temp; 1350 // } 1351 // } 1352 // 1353 Label L_loop_search_resolved; 1354 bind(L_loop_search_resolved); 1355 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1356 bind(L_loop_search_resolved_entry); 1357 cbz(temp_itbl_klass, L_no_such_interface); 1358 cmp(resolved_klass, temp_itbl_klass); 1359 br(Assembler::EQ, L_resolved_found); 1360 cmp(holder_klass, temp_itbl_klass); 1361 br(Assembler::NE, L_loop_search_resolved); 1362 mov(holder_offset, scan_temp); 1363 b(L_loop_search_resolved); 1364 1365 // See if we already have a holder klass. If not, go and scan for it. 1366 bind(L_resolved_found); 1367 cbz(holder_offset, L_search_holder); 1368 mov(scan_temp, holder_offset); 1369 1370 // Finally, scan_temp contains holder_klass vtable offset 1371 bind(L_holder_found); 1372 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1373 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1374 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1375 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1376 } 1377 1378 // virtual method calling 1379 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1380 RegisterOrConstant vtable_index, 1381 Register method_result) { 1382 assert(vtableEntry::size() * wordSize == 8, 1383 "adjust the scaling in the code below"); 1384 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1385 1386 if (vtable_index.is_register()) { 1387 lea(method_result, Address(recv_klass, 1388 vtable_index.as_register(), 1389 Address::lsl(LogBytesPerWord))); 1390 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1391 } else { 1392 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1393 ldr(method_result, 1394 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1395 } 1396 } 1397 1398 void MacroAssembler::check_klass_subtype(Register sub_klass, 1399 Register super_klass, 1400 Register temp_reg, 1401 Label& L_success) { 1402 Label L_failure; 1403 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1404 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1405 bind(L_failure); 1406 } 1407 1408 1409 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1410 Register super_klass, 1411 Register temp_reg, 1412 Label* L_success, 1413 Label* L_failure, 1414 Label* L_slow_path, 1415 RegisterOrConstant super_check_offset) { 1416 assert_different_registers(sub_klass, super_klass, temp_reg); 1417 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1418 if (super_check_offset.is_register()) { 1419 assert_different_registers(sub_klass, super_klass, 1420 super_check_offset.as_register()); 1421 } else if (must_load_sco) { 1422 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1423 } 1424 1425 Label L_fallthrough; 1426 int label_nulls = 0; 1427 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1428 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1429 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1430 assert(label_nulls <= 1, "at most one null in the batch"); 1431 1432 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1433 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1434 Address super_check_offset_addr(super_klass, sco_offset); 1435 1436 // Hacked jmp, which may only be used just before L_fallthrough. 1437 #define final_jmp(label) \ 1438 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1439 else b(label) /*omit semi*/ 1440 1441 // If the pointers are equal, we are done (e.g., String[] elements). 1442 // This self-check enables sharing of secondary supertype arrays among 1443 // non-primary types such as array-of-interface. Otherwise, each such 1444 // type would need its own customized SSA. 1445 // We move this check to the front of the fast path because many 1446 // type checks are in fact trivially successful in this manner, 1447 // so we get a nicely predicted branch right at the start of the check. 1448 cmp(sub_klass, super_klass); 1449 br(Assembler::EQ, *L_success); 1450 1451 // Check the supertype display: 1452 if (must_load_sco) { 1453 ldrw(temp_reg, super_check_offset_addr); 1454 super_check_offset = RegisterOrConstant(temp_reg); 1455 } 1456 Address super_check_addr(sub_klass, super_check_offset); 1457 ldr(rscratch1, super_check_addr); 1458 cmp(super_klass, rscratch1); // load displayed supertype 1459 1460 // This check has worked decisively for primary supers. 1461 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1462 // (Secondary supers are interfaces and very deeply nested subtypes.) 1463 // This works in the same check above because of a tricky aliasing 1464 // between the super_cache and the primary super display elements. 1465 // (The 'super_check_addr' can address either, as the case requires.) 1466 // Note that the cache is updated below if it does not help us find 1467 // what we need immediately. 1468 // So if it was a primary super, we can just fail immediately. 1469 // Otherwise, it's the slow path for us (no success at this point). 1470 1471 if (super_check_offset.is_register()) { 1472 br(Assembler::EQ, *L_success); 1473 subs(zr, super_check_offset.as_register(), sc_offset); 1474 if (L_failure == &L_fallthrough) { 1475 br(Assembler::EQ, *L_slow_path); 1476 } else { 1477 br(Assembler::NE, *L_failure); 1478 final_jmp(*L_slow_path); 1479 } 1480 } else if (super_check_offset.as_constant() == sc_offset) { 1481 // Need a slow path; fast failure is impossible. 1482 if (L_slow_path == &L_fallthrough) { 1483 br(Assembler::EQ, *L_success); 1484 } else { 1485 br(Assembler::NE, *L_slow_path); 1486 final_jmp(*L_success); 1487 } 1488 } else { 1489 // No slow path; it's a fast decision. 1490 if (L_failure == &L_fallthrough) { 1491 br(Assembler::EQ, *L_success); 1492 } else { 1493 br(Assembler::NE, *L_failure); 1494 final_jmp(*L_success); 1495 } 1496 } 1497 1498 bind(L_fallthrough); 1499 1500 #undef final_jmp 1501 } 1502 1503 // These two are taken from x86, but they look generally useful 1504 1505 // scans count pointer sized words at [addr] for occurrence of value, 1506 // generic 1507 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1508 Register scratch) { 1509 Label Lloop, Lexit; 1510 cbz(count, Lexit); 1511 bind(Lloop); 1512 ldr(scratch, post(addr, wordSize)); 1513 cmp(value, scratch); 1514 br(EQ, Lexit); 1515 sub(count, count, 1); 1516 cbnz(count, Lloop); 1517 bind(Lexit); 1518 } 1519 1520 // scans count 4 byte words at [addr] for occurrence of value, 1521 // generic 1522 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1523 Register scratch) { 1524 Label Lloop, Lexit; 1525 cbz(count, Lexit); 1526 bind(Lloop); 1527 ldrw(scratch, post(addr, wordSize)); 1528 cmpw(value, scratch); 1529 br(EQ, Lexit); 1530 sub(count, count, 1); 1531 cbnz(count, Lloop); 1532 bind(Lexit); 1533 } 1534 1535 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1536 Register super_klass, 1537 Register temp_reg, 1538 Register temp2_reg, 1539 Label* L_success, 1540 Label* L_failure, 1541 bool set_cond_codes) { 1542 // NB! Callers may assume that, when temp2_reg is a valid register, 1543 // this code sets it to a nonzero value. 1544 1545 assert_different_registers(sub_klass, super_klass, temp_reg); 1546 if (temp2_reg != noreg) 1547 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1548 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1549 1550 Label L_fallthrough; 1551 int label_nulls = 0; 1552 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1553 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1554 assert(label_nulls <= 1, "at most one null in the batch"); 1555 1556 // a couple of useful fields in sub_klass: 1557 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1558 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1559 Address secondary_supers_addr(sub_klass, ss_offset); 1560 Address super_cache_addr( sub_klass, sc_offset); 1561 1562 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1563 1564 // Do a linear scan of the secondary super-klass chain. 1565 // This code is rarely used, so simplicity is a virtue here. 1566 // The repne_scan instruction uses fixed registers, which we must spill. 1567 // Don't worry too much about pre-existing connections with the input regs. 1568 1569 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1570 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1571 1572 RegSet pushed_registers; 1573 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1574 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1575 1576 if (super_klass != r0) { 1577 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1578 } 1579 1580 push(pushed_registers, sp); 1581 1582 // Get super_klass value into r0 (even if it was in r5 or r2). 1583 if (super_klass != r0) { 1584 mov(r0, super_klass); 1585 } 1586 1587 #ifndef PRODUCT 1588 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1589 #endif //PRODUCT 1590 1591 // We will consult the secondary-super array. 1592 ldr(r5, secondary_supers_addr); 1593 // Load the array length. 1594 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1595 // Skip to start of data. 1596 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1597 1598 cmp(sp, zr); // Clear Z flag; SP is never zero 1599 // Scan R2 words at [R5] for an occurrence of R0. 1600 // Set NZ/Z based on last compare. 1601 repne_scan(r5, r0, r2, rscratch1); 1602 1603 // Unspill the temp. registers: 1604 pop(pushed_registers, sp); 1605 1606 br(Assembler::NE, *L_failure); 1607 1608 // Success. Cache the super we found and proceed in triumph. 1609 str(super_klass, super_cache_addr); 1610 1611 if (L_success != &L_fallthrough) { 1612 b(*L_success); 1613 } 1614 1615 #undef IS_A_TEMP 1616 1617 bind(L_fallthrough); 1618 } 1619 1620 // Ensure that the inline code and the stub are using the same registers. 1621 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1622 do { \ 1623 assert(r_super_klass == r0 && \ 1624 r_array_base == r1 && \ 1625 r_array_length == r2 && \ 1626 (r_array_index == r3 || r_array_index == noreg) && \ 1627 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1628 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1629 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1630 } while(0) 1631 1632 // Return true: we succeeded in generating this code 1633 bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 1634 Register r_super_klass, 1635 Register temp1, 1636 Register temp2, 1637 Register temp3, 1638 FloatRegister vtemp, 1639 Register result, 1640 u1 super_klass_slot, 1641 bool stub_is_near) { 1642 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1643 1644 Label L_fallthrough; 1645 1646 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1647 1648 const Register 1649 r_array_base = temp1, // r1 1650 r_array_length = temp2, // r2 1651 r_array_index = temp3, // r3 1652 r_bitmap = rscratch2; 1653 1654 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1655 1656 u1 bit = super_klass_slot; 1657 1658 // Make sure that result is nonzero if the TBZ below misses. 1659 mov(result, 1); 1660 1661 // We're going to need the bitmap in a vector reg and in a core reg, 1662 // so load both now. 1663 ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 1664 if (bit != 0) { 1665 ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset())); 1666 } 1667 // First check the bitmap to see if super_klass might be present. If 1668 // the bit is zero, we are certain that super_klass is not one of 1669 // the secondary supers. 1670 tbz(r_bitmap, bit, L_fallthrough); 1671 1672 // Get the first array index that can contain super_klass into r_array_index. 1673 if (bit != 0) { 1674 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1675 cnt(vtemp, T8B, vtemp); 1676 addv(vtemp, T8B, vtemp); 1677 fmovd(r_array_index, vtemp); 1678 } else { 1679 mov(r_array_index, (u1)1); 1680 } 1681 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1682 1683 // We will consult the secondary-super array. 1684 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1685 1686 // The value i in r_array_index is >= 1, so even though r_array_base 1687 // points to the length, we don't need to adjust it to point to the 1688 // data. 1689 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1690 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1691 1692 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1693 eor(result, result, r_super_klass); 1694 cbz(result, L_fallthrough); // Found a match 1695 1696 // Is there another entry to check? Consult the bitmap. 1697 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1698 1699 // Linear probe. 1700 if (bit != 0) { 1701 ror(r_bitmap, r_bitmap, bit); 1702 } 1703 1704 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1705 // The next slot to be inspected, by the stub we're about to call, 1706 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1707 // have been checked. 1708 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1709 if (stub_is_near) { 1710 bl(stub); 1711 } else { 1712 address call = trampoline_call(stub); 1713 if (call == nullptr) { 1714 return false; // trampoline allocation failed 1715 } 1716 } 1717 1718 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1719 1720 bind(L_fallthrough); 1721 1722 if (VerifySecondarySupers) { 1723 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1724 temp1, temp2, result); // r1, r2, r5 1725 } 1726 return true; 1727 } 1728 1729 // Called by code generated by check_klass_subtype_slow_path 1730 // above. This is called when there is a collision in the hashed 1731 // lookup in the secondary supers array. 1732 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1733 Register r_array_base, 1734 Register r_array_index, 1735 Register r_bitmap, 1736 Register temp1, 1737 Register result) { 1738 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1739 1740 const Register 1741 r_array_length = temp1, 1742 r_sub_klass = noreg; // unused 1743 1744 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1745 1746 Label L_fallthrough, L_huge; 1747 1748 // Load the array length. 1749 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1750 // And adjust the array base to point to the data. 1751 // NB! Effectively increments current slot index by 1. 1752 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1753 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1754 1755 // The bitmap is full to bursting. 1756 // Implicit invariant: BITMAP_FULL implies (length > 0) 1757 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1758 cmn(r_bitmap, (u1)1); 1759 br(EQ, L_huge); 1760 1761 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1762 // current slot (at secondary_supers[r_array_index]) has not yet 1763 // been inspected, and r_array_index may be out of bounds if we 1764 // wrapped around the end of the array. 1765 1766 { // This is conventional linear probing, but instead of terminating 1767 // when a null entry is found in the table, we maintain a bitmap 1768 // in which a 0 indicates missing entries. 1769 // The check above guarantees there are 0s in the bitmap, so the loop 1770 // eventually terminates. 1771 Label L_loop; 1772 bind(L_loop); 1773 1774 // Check for wraparound. 1775 cmp(r_array_index, r_array_length); 1776 csel(r_array_index, zr, r_array_index, GE); 1777 1778 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1779 eor(result, rscratch1, r_super_klass); 1780 cbz(result, L_fallthrough); 1781 1782 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1783 1784 ror(r_bitmap, r_bitmap, 1); 1785 add(r_array_index, r_array_index, 1); 1786 b(L_loop); 1787 } 1788 1789 { // Degenerate case: more than 64 secondary supers. 1790 // FIXME: We could do something smarter here, maybe a vectorized 1791 // comparison or a binary search, but is that worth any added 1792 // complexity? 1793 bind(L_huge); 1794 cmp(sp, zr); // Clear Z flag; SP is never zero 1795 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1796 cset(result, NE); // result == 0 iff we got a match. 1797 } 1798 1799 bind(L_fallthrough); 1800 } 1801 1802 // Make sure that the hashed lookup and a linear scan agree. 1803 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1804 Register r_super_klass, 1805 Register temp1, 1806 Register temp2, 1807 Register result) { 1808 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1809 1810 const Register 1811 r_array_base = temp1, 1812 r_array_length = temp2, 1813 r_array_index = noreg, // unused 1814 r_bitmap = noreg; // unused 1815 1816 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1817 1818 BLOCK_COMMENT("verify_secondary_supers_table {"); 1819 1820 // We will consult the secondary-super array. 1821 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1822 1823 // Load the array length. 1824 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1825 // And adjust the array base to point to the data. 1826 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1827 1828 cmp(sp, zr); // Clear Z flag; SP is never zero 1829 // Scan R2 words at [R5] for an occurrence of R0. 1830 // Set NZ/Z based on last compare. 1831 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 1832 // rscratch1 == 0 iff we got a match. 1833 cset(rscratch1, NE); 1834 1835 Label passed; 1836 cmp(result, zr); 1837 cset(result, NE); // normalize result to 0/1 for comparison 1838 1839 cmp(rscratch1, result); 1840 br(EQ, passed); 1841 { 1842 mov(r0, r_super_klass); // r0 <- r0 1843 mov(r1, r_sub_klass); // r1 <- r4 1844 mov(r2, /*expected*/rscratch1); // r2 <- r8 1845 mov(r3, result); // r3 <- r5 1846 mov(r4, (address)("mismatch")); // r4 <- const 1847 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 1848 should_not_reach_here(); 1849 } 1850 bind(passed); 1851 1852 BLOCK_COMMENT("} verify_secondary_supers_table"); 1853 } 1854 1855 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1856 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1857 assert_different_registers(klass, rthread, scratch); 1858 1859 Label L_fallthrough, L_tmp; 1860 if (L_fast_path == nullptr) { 1861 L_fast_path = &L_fallthrough; 1862 } else if (L_slow_path == nullptr) { 1863 L_slow_path = &L_fallthrough; 1864 } 1865 // Fast path check: class is fully initialized 1866 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset())); 1867 subs(zr, scratch, InstanceKlass::fully_initialized); 1868 br(Assembler::EQ, *L_fast_path); 1869 1870 // Fast path check: current thread is initializer thread 1871 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1872 cmp(rthread, scratch); 1873 1874 if (L_slow_path == &L_fallthrough) { 1875 br(Assembler::EQ, *L_fast_path); 1876 bind(*L_slow_path); 1877 } else if (L_fast_path == &L_fallthrough) { 1878 br(Assembler::NE, *L_slow_path); 1879 bind(*L_fast_path); 1880 } else { 1881 Unimplemented(); 1882 } 1883 } 1884 1885 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1886 if (!VerifyOops) return; 1887 1888 // Pass register number to verify_oop_subroutine 1889 const char* b = nullptr; 1890 { 1891 ResourceMark rm; 1892 stringStream ss; 1893 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1894 b = code_string(ss.as_string()); 1895 } 1896 BLOCK_COMMENT("verify_oop {"); 1897 1898 strip_return_address(); // This might happen within a stack frame. 1899 protect_return_address(); 1900 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1901 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1902 1903 mov(r0, reg); 1904 movptr(rscratch1, (uintptr_t)(address)b); 1905 1906 // call indirectly to solve generation ordering problem 1907 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1908 ldr(rscratch2, Address(rscratch2)); 1909 blr(rscratch2); 1910 1911 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1912 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1913 authenticate_return_address(); 1914 1915 BLOCK_COMMENT("} verify_oop"); 1916 } 1917 1918 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1919 if (!VerifyOops) return; 1920 1921 const char* b = nullptr; 1922 { 1923 ResourceMark rm; 1924 stringStream ss; 1925 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1926 b = code_string(ss.as_string()); 1927 } 1928 BLOCK_COMMENT("verify_oop_addr {"); 1929 1930 strip_return_address(); // This might happen within a stack frame. 1931 protect_return_address(); 1932 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1933 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1934 1935 // addr may contain sp so we will have to adjust it based on the 1936 // pushes that we just did. 1937 if (addr.uses(sp)) { 1938 lea(r0, addr); 1939 ldr(r0, Address(r0, 4 * wordSize)); 1940 } else { 1941 ldr(r0, addr); 1942 } 1943 movptr(rscratch1, (uintptr_t)(address)b); 1944 1945 // call indirectly to solve generation ordering problem 1946 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1947 ldr(rscratch2, Address(rscratch2)); 1948 blr(rscratch2); 1949 1950 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1951 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1952 authenticate_return_address(); 1953 1954 BLOCK_COMMENT("} verify_oop_addr"); 1955 } 1956 1957 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1958 int extra_slot_offset) { 1959 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1960 int stackElementSize = Interpreter::stackElementSize; 1961 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1962 #ifdef ASSERT 1963 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1964 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1965 #endif 1966 if (arg_slot.is_constant()) { 1967 return Address(esp, arg_slot.as_constant() * stackElementSize 1968 + offset); 1969 } else { 1970 add(rscratch1, esp, arg_slot.as_register(), 1971 ext::uxtx, exact_log2(stackElementSize)); 1972 return Address(rscratch1, offset); 1973 } 1974 } 1975 1976 void MacroAssembler::call_VM_leaf_base(address entry_point, 1977 int number_of_arguments, 1978 Label *retaddr) { 1979 Label E, L; 1980 1981 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1982 1983 mov(rscratch1, entry_point); 1984 blr(rscratch1); 1985 if (retaddr) 1986 bind(*retaddr); 1987 1988 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1989 } 1990 1991 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1992 call_VM_leaf_base(entry_point, number_of_arguments); 1993 } 1994 1995 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1996 pass_arg0(this, arg_0); 1997 call_VM_leaf_base(entry_point, 1); 1998 } 1999 2000 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2001 assert_different_registers(arg_1, c_rarg0); 2002 pass_arg0(this, arg_0); 2003 pass_arg1(this, arg_1); 2004 call_VM_leaf_base(entry_point, 2); 2005 } 2006 2007 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2008 Register arg_1, Register arg_2) { 2009 assert_different_registers(arg_1, c_rarg0); 2010 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2011 pass_arg0(this, arg_0); 2012 pass_arg1(this, arg_1); 2013 pass_arg2(this, arg_2); 2014 call_VM_leaf_base(entry_point, 3); 2015 } 2016 2017 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2018 pass_arg0(this, arg_0); 2019 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2020 } 2021 2022 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2023 2024 assert_different_registers(arg_0, c_rarg1); 2025 pass_arg1(this, arg_1); 2026 pass_arg0(this, arg_0); 2027 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2028 } 2029 2030 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2031 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2032 assert_different_registers(arg_1, c_rarg2); 2033 pass_arg2(this, arg_2); 2034 pass_arg1(this, arg_1); 2035 pass_arg0(this, arg_0); 2036 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2037 } 2038 2039 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2040 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2041 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2042 assert_different_registers(arg_2, c_rarg3); 2043 pass_arg3(this, arg_3); 2044 pass_arg2(this, arg_2); 2045 pass_arg1(this, arg_1); 2046 pass_arg0(this, arg_0); 2047 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2048 } 2049 2050 void MacroAssembler::null_check(Register reg, int offset) { 2051 if (needs_explicit_null_check(offset)) { 2052 // provoke OS null exception if reg is null by 2053 // accessing M[reg] w/o changing any registers 2054 // NOTE: this is plenty to provoke a segv 2055 ldr(zr, Address(reg)); 2056 } else { 2057 // nothing to do, (later) access of M[reg + offset] 2058 // will provoke OS null exception if reg is null 2059 } 2060 } 2061 2062 // MacroAssembler protected routines needed to implement 2063 // public methods 2064 2065 void MacroAssembler::mov(Register r, Address dest) { 2066 code_section()->relocate(pc(), dest.rspec()); 2067 uint64_t imm64 = (uint64_t)dest.target(); 2068 movptr(r, imm64); 2069 } 2070 2071 // Move a constant pointer into r. In AArch64 mode the virtual 2072 // address space is 48 bits in size, so we only need three 2073 // instructions to create a patchable instruction sequence that can 2074 // reach anywhere. 2075 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2076 #ifndef PRODUCT 2077 { 2078 char buffer[64]; 2079 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2080 block_comment(buffer); 2081 } 2082 #endif 2083 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2084 movz(r, imm64 & 0xffff); 2085 imm64 >>= 16; 2086 movk(r, imm64 & 0xffff, 16); 2087 imm64 >>= 16; 2088 movk(r, imm64 & 0xffff, 32); 2089 } 2090 2091 // Macro to mov replicated immediate to vector register. 2092 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2093 // the upper 56/48/32 bits must be zeros for B/H/S type. 2094 // Vd will get the following values for different arrangements in T 2095 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2096 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2097 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2098 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2099 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2100 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2101 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2102 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2103 // Clobbers rscratch1 2104 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2105 assert(T != T1Q, "unsupported"); 2106 if (T == T1D || T == T2D) { 2107 int imm = operand_valid_for_movi_immediate(imm64, T); 2108 if (-1 != imm) { 2109 movi(Vd, T, imm); 2110 } else { 2111 mov(rscratch1, imm64); 2112 dup(Vd, T, rscratch1); 2113 } 2114 return; 2115 } 2116 2117 #ifdef ASSERT 2118 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2119 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2120 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2121 #endif 2122 int shift = operand_valid_for_movi_immediate(imm64, T); 2123 uint32_t imm32 = imm64 & 0xffffffffULL; 2124 if (shift >= 0) { 2125 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2126 } else { 2127 movw(rscratch1, imm32); 2128 dup(Vd, T, rscratch1); 2129 } 2130 } 2131 2132 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2133 { 2134 #ifndef PRODUCT 2135 { 2136 char buffer[64]; 2137 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2138 block_comment(buffer); 2139 } 2140 #endif 2141 if (operand_valid_for_logical_immediate(false, imm64)) { 2142 orr(dst, zr, imm64); 2143 } else { 2144 // we can use a combination of MOVZ or MOVN with 2145 // MOVK to build up the constant 2146 uint64_t imm_h[4]; 2147 int zero_count = 0; 2148 int neg_count = 0; 2149 int i; 2150 for (i = 0; i < 4; i++) { 2151 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2152 if (imm_h[i] == 0) { 2153 zero_count++; 2154 } else if (imm_h[i] == 0xffffL) { 2155 neg_count++; 2156 } 2157 } 2158 if (zero_count == 4) { 2159 // one MOVZ will do 2160 movz(dst, 0); 2161 } else if (neg_count == 4) { 2162 // one MOVN will do 2163 movn(dst, 0); 2164 } else if (zero_count == 3) { 2165 for (i = 0; i < 4; i++) { 2166 if (imm_h[i] != 0L) { 2167 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2168 break; 2169 } 2170 } 2171 } else if (neg_count == 3) { 2172 // one MOVN will do 2173 for (int i = 0; i < 4; i++) { 2174 if (imm_h[i] != 0xffffL) { 2175 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2176 break; 2177 } 2178 } 2179 } else if (zero_count == 2) { 2180 // one MOVZ and one MOVK will do 2181 for (i = 0; i < 3; i++) { 2182 if (imm_h[i] != 0L) { 2183 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2184 i++; 2185 break; 2186 } 2187 } 2188 for (;i < 4; i++) { 2189 if (imm_h[i] != 0L) { 2190 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2191 } 2192 } 2193 } else if (neg_count == 2) { 2194 // one MOVN and one MOVK will do 2195 for (i = 0; i < 4; i++) { 2196 if (imm_h[i] != 0xffffL) { 2197 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2198 i++; 2199 break; 2200 } 2201 } 2202 for (;i < 4; i++) { 2203 if (imm_h[i] != 0xffffL) { 2204 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2205 } 2206 } 2207 } else if (zero_count == 1) { 2208 // one MOVZ and two MOVKs will do 2209 for (i = 0; i < 4; i++) { 2210 if (imm_h[i] != 0L) { 2211 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2212 i++; 2213 break; 2214 } 2215 } 2216 for (;i < 4; i++) { 2217 if (imm_h[i] != 0x0L) { 2218 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2219 } 2220 } 2221 } else if (neg_count == 1) { 2222 // one MOVN and two MOVKs will do 2223 for (i = 0; i < 4; i++) { 2224 if (imm_h[i] != 0xffffL) { 2225 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2226 i++; 2227 break; 2228 } 2229 } 2230 for (;i < 4; i++) { 2231 if (imm_h[i] != 0xffffL) { 2232 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2233 } 2234 } 2235 } else { 2236 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2237 movz(dst, (uint32_t)imm_h[0], 0); 2238 for (i = 1; i < 4; i++) { 2239 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2240 } 2241 } 2242 } 2243 } 2244 2245 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2246 { 2247 #ifndef PRODUCT 2248 { 2249 char buffer[64]; 2250 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2251 block_comment(buffer); 2252 } 2253 #endif 2254 if (operand_valid_for_logical_immediate(true, imm32)) { 2255 orrw(dst, zr, imm32); 2256 } else { 2257 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2258 // constant 2259 uint32_t imm_h[2]; 2260 imm_h[0] = imm32 & 0xffff; 2261 imm_h[1] = ((imm32 >> 16) & 0xffff); 2262 if (imm_h[0] == 0) { 2263 movzw(dst, imm_h[1], 16); 2264 } else if (imm_h[0] == 0xffff) { 2265 movnw(dst, imm_h[1] ^ 0xffff, 16); 2266 } else if (imm_h[1] == 0) { 2267 movzw(dst, imm_h[0], 0); 2268 } else if (imm_h[1] == 0xffff) { 2269 movnw(dst, imm_h[0] ^ 0xffff, 0); 2270 } else { 2271 // use a MOVZ and MOVK (makes it easier to debug) 2272 movzw(dst, imm_h[0], 0); 2273 movkw(dst, imm_h[1], 16); 2274 } 2275 } 2276 } 2277 2278 // Form an address from base + offset in Rd. Rd may or may 2279 // not actually be used: you must use the Address that is returned. 2280 // It is up to you to ensure that the shift provided matches the size 2281 // of your data. 2282 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2283 if (Address::offset_ok_for_immed(byte_offset, shift)) 2284 // It fits; no need for any heroics 2285 return Address(base, byte_offset); 2286 2287 // Don't do anything clever with negative or misaligned offsets 2288 unsigned mask = (1 << shift) - 1; 2289 if (byte_offset < 0 || byte_offset & mask) { 2290 mov(Rd, byte_offset); 2291 add(Rd, base, Rd); 2292 return Address(Rd); 2293 } 2294 2295 // See if we can do this with two 12-bit offsets 2296 { 2297 uint64_t word_offset = byte_offset >> shift; 2298 uint64_t masked_offset = word_offset & 0xfff000; 2299 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2300 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2301 add(Rd, base, masked_offset << shift); 2302 word_offset -= masked_offset; 2303 return Address(Rd, word_offset << shift); 2304 } 2305 } 2306 2307 // Do it the hard way 2308 mov(Rd, byte_offset); 2309 add(Rd, base, Rd); 2310 return Address(Rd); 2311 } 2312 2313 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2314 bool want_remainder, Register scratch) 2315 { 2316 // Full implementation of Java idiv and irem. The function 2317 // returns the (pc) offset of the div instruction - may be needed 2318 // for implicit exceptions. 2319 // 2320 // constraint : ra/rb =/= scratch 2321 // normal case 2322 // 2323 // input : ra: dividend 2324 // rb: divisor 2325 // 2326 // result: either 2327 // quotient (= ra idiv rb) 2328 // remainder (= ra irem rb) 2329 2330 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2331 2332 int idivl_offset = offset(); 2333 if (! want_remainder) { 2334 sdivw(result, ra, rb); 2335 } else { 2336 sdivw(scratch, ra, rb); 2337 Assembler::msubw(result, scratch, rb, ra); 2338 } 2339 2340 return idivl_offset; 2341 } 2342 2343 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2344 bool want_remainder, Register scratch) 2345 { 2346 // Full implementation of Java ldiv and lrem. The function 2347 // returns the (pc) offset of the div instruction - may be needed 2348 // for implicit exceptions. 2349 // 2350 // constraint : ra/rb =/= scratch 2351 // normal case 2352 // 2353 // input : ra: dividend 2354 // rb: divisor 2355 // 2356 // result: either 2357 // quotient (= ra idiv rb) 2358 // remainder (= ra irem rb) 2359 2360 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2361 2362 int idivq_offset = offset(); 2363 if (! want_remainder) { 2364 sdiv(result, ra, rb); 2365 } else { 2366 sdiv(scratch, ra, rb); 2367 Assembler::msub(result, scratch, rb, ra); 2368 } 2369 2370 return idivq_offset; 2371 } 2372 2373 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2374 address prev = pc() - NativeMembar::instruction_size; 2375 address last = code()->last_insn(); 2376 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2377 NativeMembar *bar = NativeMembar_at(prev); 2378 // We are merging two memory barrier instructions. On AArch64 we 2379 // can do this simply by ORing them together. 2380 bar->set_kind(bar->get_kind() | order_constraint); 2381 BLOCK_COMMENT("merged membar"); 2382 } else { 2383 code()->set_last_insn(pc()); 2384 dmb(Assembler::barrier(order_constraint)); 2385 } 2386 } 2387 2388 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2389 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2390 merge_ldst(rt, adr, size_in_bytes, is_store); 2391 code()->clear_last_insn(); 2392 return true; 2393 } else { 2394 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2395 const uint64_t mask = size_in_bytes - 1; 2396 if (adr.getMode() == Address::base_plus_offset && 2397 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2398 code()->set_last_insn(pc()); 2399 } 2400 return false; 2401 } 2402 } 2403 2404 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2405 // We always try to merge two adjacent loads into one ldp. 2406 if (!try_merge_ldst(Rx, adr, 8, false)) { 2407 Assembler::ldr(Rx, adr); 2408 } 2409 } 2410 2411 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2412 // We always try to merge two adjacent loads into one ldp. 2413 if (!try_merge_ldst(Rw, adr, 4, false)) { 2414 Assembler::ldrw(Rw, adr); 2415 } 2416 } 2417 2418 void MacroAssembler::str(Register Rx, const Address &adr) { 2419 // We always try to merge two adjacent stores into one stp. 2420 if (!try_merge_ldst(Rx, adr, 8, true)) { 2421 Assembler::str(Rx, adr); 2422 } 2423 } 2424 2425 void MacroAssembler::strw(Register Rw, const Address &adr) { 2426 // We always try to merge two adjacent stores into one stp. 2427 if (!try_merge_ldst(Rw, adr, 4, true)) { 2428 Assembler::strw(Rw, adr); 2429 } 2430 } 2431 2432 // MacroAssembler routines found actually to be needed 2433 2434 void MacroAssembler::push(Register src) 2435 { 2436 str(src, Address(pre(esp, -1 * wordSize))); 2437 } 2438 2439 void MacroAssembler::pop(Register dst) 2440 { 2441 ldr(dst, Address(post(esp, 1 * wordSize))); 2442 } 2443 2444 // Note: load_unsigned_short used to be called load_unsigned_word. 2445 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2446 int off = offset(); 2447 ldrh(dst, src); 2448 return off; 2449 } 2450 2451 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2452 int off = offset(); 2453 ldrb(dst, src); 2454 return off; 2455 } 2456 2457 int MacroAssembler::load_signed_short(Register dst, Address src) { 2458 int off = offset(); 2459 ldrsh(dst, src); 2460 return off; 2461 } 2462 2463 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2464 int off = offset(); 2465 ldrsb(dst, src); 2466 return off; 2467 } 2468 2469 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2470 int off = offset(); 2471 ldrshw(dst, src); 2472 return off; 2473 } 2474 2475 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2476 int off = offset(); 2477 ldrsbw(dst, src); 2478 return off; 2479 } 2480 2481 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2482 switch (size_in_bytes) { 2483 case 8: ldr(dst, src); break; 2484 case 4: ldrw(dst, src); break; 2485 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2486 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2487 default: ShouldNotReachHere(); 2488 } 2489 } 2490 2491 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2492 switch (size_in_bytes) { 2493 case 8: str(src, dst); break; 2494 case 4: strw(src, dst); break; 2495 case 2: strh(src, dst); break; 2496 case 1: strb(src, dst); break; 2497 default: ShouldNotReachHere(); 2498 } 2499 } 2500 2501 void MacroAssembler::decrementw(Register reg, int value) 2502 { 2503 if (value < 0) { incrementw(reg, -value); return; } 2504 if (value == 0) { return; } 2505 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2506 /* else */ { 2507 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2508 movw(rscratch2, (unsigned)value); 2509 subw(reg, reg, rscratch2); 2510 } 2511 } 2512 2513 void MacroAssembler::decrement(Register reg, int value) 2514 { 2515 if (value < 0) { increment(reg, -value); return; } 2516 if (value == 0) { return; } 2517 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2518 /* else */ { 2519 assert(reg != rscratch2, "invalid dst for register decrement"); 2520 mov(rscratch2, (uint64_t)value); 2521 sub(reg, reg, rscratch2); 2522 } 2523 } 2524 2525 void MacroAssembler::decrementw(Address dst, int value) 2526 { 2527 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2528 if (dst.getMode() == Address::literal) { 2529 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2530 lea(rscratch2, dst); 2531 dst = Address(rscratch2); 2532 } 2533 ldrw(rscratch1, dst); 2534 decrementw(rscratch1, value); 2535 strw(rscratch1, dst); 2536 } 2537 2538 void MacroAssembler::decrement(Address dst, int value) 2539 { 2540 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2541 if (dst.getMode() == Address::literal) { 2542 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2543 lea(rscratch2, dst); 2544 dst = Address(rscratch2); 2545 } 2546 ldr(rscratch1, dst); 2547 decrement(rscratch1, value); 2548 str(rscratch1, dst); 2549 } 2550 2551 void MacroAssembler::incrementw(Register reg, int value) 2552 { 2553 if (value < 0) { decrementw(reg, -value); return; } 2554 if (value == 0) { return; } 2555 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2556 /* else */ { 2557 assert(reg != rscratch2, "invalid dst for register increment"); 2558 movw(rscratch2, (unsigned)value); 2559 addw(reg, reg, rscratch2); 2560 } 2561 } 2562 2563 void MacroAssembler::increment(Register reg, int value) 2564 { 2565 if (value < 0) { decrement(reg, -value); return; } 2566 if (value == 0) { return; } 2567 if (value < (1 << 12)) { add(reg, reg, value); return; } 2568 /* else */ { 2569 assert(reg != rscratch2, "invalid dst for register increment"); 2570 movw(rscratch2, (unsigned)value); 2571 add(reg, reg, rscratch2); 2572 } 2573 } 2574 2575 void MacroAssembler::incrementw(Address dst, int value) 2576 { 2577 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2578 if (dst.getMode() == Address::literal) { 2579 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2580 lea(rscratch2, dst); 2581 dst = Address(rscratch2); 2582 } 2583 ldrw(rscratch1, dst); 2584 incrementw(rscratch1, value); 2585 strw(rscratch1, dst); 2586 } 2587 2588 void MacroAssembler::increment(Address dst, int value) 2589 { 2590 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2591 if (dst.getMode() == Address::literal) { 2592 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2593 lea(rscratch2, dst); 2594 dst = Address(rscratch2); 2595 } 2596 ldr(rscratch1, dst); 2597 increment(rscratch1, value); 2598 str(rscratch1, dst); 2599 } 2600 2601 // Push lots of registers in the bit set supplied. Don't push sp. 2602 // Return the number of words pushed 2603 int MacroAssembler::push(unsigned int bitset, Register stack) { 2604 int words_pushed = 0; 2605 2606 // Scan bitset to accumulate register pairs 2607 unsigned char regs[32]; 2608 int count = 0; 2609 for (int reg = 0; reg <= 30; reg++) { 2610 if (1 & bitset) 2611 regs[count++] = reg; 2612 bitset >>= 1; 2613 } 2614 regs[count++] = zr->raw_encoding(); 2615 count &= ~1; // Only push an even number of regs 2616 2617 if (count) { 2618 stp(as_Register(regs[0]), as_Register(regs[1]), 2619 Address(pre(stack, -count * wordSize))); 2620 words_pushed += 2; 2621 } 2622 for (int i = 2; i < count; i += 2) { 2623 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2624 Address(stack, i * wordSize)); 2625 words_pushed += 2; 2626 } 2627 2628 assert(words_pushed == count, "oops, pushed != count"); 2629 2630 return count; 2631 } 2632 2633 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2634 int words_pushed = 0; 2635 2636 // Scan bitset to accumulate register pairs 2637 unsigned char regs[32]; 2638 int count = 0; 2639 for (int reg = 0; reg <= 30; reg++) { 2640 if (1 & bitset) 2641 regs[count++] = reg; 2642 bitset >>= 1; 2643 } 2644 regs[count++] = zr->raw_encoding(); 2645 count &= ~1; 2646 2647 for (int i = 2; i < count; i += 2) { 2648 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2649 Address(stack, i * wordSize)); 2650 words_pushed += 2; 2651 } 2652 if (count) { 2653 ldp(as_Register(regs[0]), as_Register(regs[1]), 2654 Address(post(stack, count * wordSize))); 2655 words_pushed += 2; 2656 } 2657 2658 assert(words_pushed == count, "oops, pushed != count"); 2659 2660 return count; 2661 } 2662 2663 // Push lots of registers in the bit set supplied. Don't push sp. 2664 // Return the number of dwords pushed 2665 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2666 int words_pushed = 0; 2667 bool use_sve = false; 2668 int sve_vector_size_in_bytes = 0; 2669 2670 #ifdef COMPILER2 2671 use_sve = Matcher::supports_scalable_vector(); 2672 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2673 #endif 2674 2675 // Scan bitset to accumulate register pairs 2676 unsigned char regs[32]; 2677 int count = 0; 2678 for (int reg = 0; reg <= 31; reg++) { 2679 if (1 & bitset) 2680 regs[count++] = reg; 2681 bitset >>= 1; 2682 } 2683 2684 if (count == 0) { 2685 return 0; 2686 } 2687 2688 if (mode == PushPopFull) { 2689 if (use_sve && sve_vector_size_in_bytes > 16) { 2690 mode = PushPopSVE; 2691 } else { 2692 mode = PushPopNeon; 2693 } 2694 } 2695 2696 #ifndef PRODUCT 2697 { 2698 char buffer[48]; 2699 if (mode == PushPopSVE) { 2700 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2701 } else if (mode == PushPopNeon) { 2702 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2703 } else { 2704 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2705 } 2706 block_comment(buffer); 2707 } 2708 #endif 2709 2710 if (mode == PushPopSVE) { 2711 sub(stack, stack, sve_vector_size_in_bytes * count); 2712 for (int i = 0; i < count; i++) { 2713 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2714 } 2715 return count * sve_vector_size_in_bytes / 8; 2716 } 2717 2718 if (mode == PushPopNeon) { 2719 if (count == 1) { 2720 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2721 return 2; 2722 } 2723 2724 bool odd = (count & 1) == 1; 2725 int push_slots = count + (odd ? 1 : 0); 2726 2727 // Always pushing full 128 bit registers. 2728 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2729 words_pushed += 2; 2730 2731 for (int i = 2; i + 1 < count; i += 2) { 2732 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2733 words_pushed += 2; 2734 } 2735 2736 if (odd) { 2737 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2738 words_pushed++; 2739 } 2740 2741 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2742 return count * 2; 2743 } 2744 2745 if (mode == PushPopFp) { 2746 bool odd = (count & 1) == 1; 2747 int push_slots = count + (odd ? 1 : 0); 2748 2749 if (count == 1) { 2750 // Stack pointer must be 16 bytes aligned 2751 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2752 return 1; 2753 } 2754 2755 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2756 words_pushed += 2; 2757 2758 for (int i = 2; i + 1 < count; i += 2) { 2759 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2760 words_pushed += 2; 2761 } 2762 2763 if (odd) { 2764 // Stack pointer must be 16 bytes aligned 2765 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2766 words_pushed++; 2767 } 2768 2769 assert(words_pushed == count, "oops, pushed != count"); 2770 2771 return count; 2772 } 2773 2774 return 0; 2775 } 2776 2777 // Return the number of dwords popped 2778 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2779 int words_pushed = 0; 2780 bool use_sve = false; 2781 int sve_vector_size_in_bytes = 0; 2782 2783 #ifdef COMPILER2 2784 use_sve = Matcher::supports_scalable_vector(); 2785 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2786 #endif 2787 // Scan bitset to accumulate register pairs 2788 unsigned char regs[32]; 2789 int count = 0; 2790 for (int reg = 0; reg <= 31; reg++) { 2791 if (1 & bitset) 2792 regs[count++] = reg; 2793 bitset >>= 1; 2794 } 2795 2796 if (count == 0) { 2797 return 0; 2798 } 2799 2800 if (mode == PushPopFull) { 2801 if (use_sve && sve_vector_size_in_bytes > 16) { 2802 mode = PushPopSVE; 2803 } else { 2804 mode = PushPopNeon; 2805 } 2806 } 2807 2808 #ifndef PRODUCT 2809 { 2810 char buffer[48]; 2811 if (mode == PushPopSVE) { 2812 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 2813 } else if (mode == PushPopNeon) { 2814 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 2815 } else { 2816 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 2817 } 2818 block_comment(buffer); 2819 } 2820 #endif 2821 2822 if (mode == PushPopSVE) { 2823 for (int i = count - 1; i >= 0; i--) { 2824 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2825 } 2826 add(stack, stack, sve_vector_size_in_bytes * count); 2827 return count * sve_vector_size_in_bytes / 8; 2828 } 2829 2830 if (mode == PushPopNeon) { 2831 if (count == 1) { 2832 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2833 return 2; 2834 } 2835 2836 bool odd = (count & 1) == 1; 2837 int push_slots = count + (odd ? 1 : 0); 2838 2839 if (odd) { 2840 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2841 words_pushed++; 2842 } 2843 2844 for (int i = 2; i + 1 < count; i += 2) { 2845 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2846 words_pushed += 2; 2847 } 2848 2849 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2850 words_pushed += 2; 2851 2852 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2853 2854 return count * 2; 2855 } 2856 2857 if (mode == PushPopFp) { 2858 bool odd = (count & 1) == 1; 2859 int push_slots = count + (odd ? 1 : 0); 2860 2861 if (count == 1) { 2862 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 2863 return 1; 2864 } 2865 2866 if (odd) { 2867 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2868 words_pushed++; 2869 } 2870 2871 for (int i = 2; i + 1 < count; i += 2) { 2872 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2873 words_pushed += 2; 2874 } 2875 2876 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 2877 words_pushed += 2; 2878 2879 assert(words_pushed == count, "oops, pushed != count"); 2880 2881 return count; 2882 } 2883 2884 return 0; 2885 } 2886 2887 // Return the number of dwords pushed 2888 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2889 bool use_sve = false; 2890 int sve_predicate_size_in_slots = 0; 2891 2892 #ifdef COMPILER2 2893 use_sve = Matcher::supports_scalable_vector(); 2894 if (use_sve) { 2895 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2896 } 2897 #endif 2898 2899 if (!use_sve) { 2900 return 0; 2901 } 2902 2903 unsigned char regs[PRegister::number_of_registers]; 2904 int count = 0; 2905 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2906 if (1 & bitset) 2907 regs[count++] = reg; 2908 bitset >>= 1; 2909 } 2910 2911 if (count == 0) { 2912 return 0; 2913 } 2914 2915 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2916 VMRegImpl::stack_slot_size * count, 16); 2917 sub(stack, stack, total_push_bytes); 2918 for (int i = 0; i < count; i++) { 2919 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2920 } 2921 return total_push_bytes / 8; 2922 } 2923 2924 // Return the number of dwords popped 2925 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2926 bool use_sve = false; 2927 int sve_predicate_size_in_slots = 0; 2928 2929 #ifdef COMPILER2 2930 use_sve = Matcher::supports_scalable_vector(); 2931 if (use_sve) { 2932 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2933 } 2934 #endif 2935 2936 if (!use_sve) { 2937 return 0; 2938 } 2939 2940 unsigned char regs[PRegister::number_of_registers]; 2941 int count = 0; 2942 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2943 if (1 & bitset) 2944 regs[count++] = reg; 2945 bitset >>= 1; 2946 } 2947 2948 if (count == 0) { 2949 return 0; 2950 } 2951 2952 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2953 VMRegImpl::stack_slot_size * count, 16); 2954 for (int i = count - 1; i >= 0; i--) { 2955 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2956 } 2957 add(stack, stack, total_pop_bytes); 2958 return total_pop_bytes / 8; 2959 } 2960 2961 #ifdef ASSERT 2962 void MacroAssembler::verify_heapbase(const char* msg) { 2963 #if 0 2964 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2965 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2966 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2967 // rheapbase is allocated as general register 2968 return; 2969 } 2970 if (CheckCompressedOops) { 2971 Label ok; 2972 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2973 cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2974 br(Assembler::EQ, ok); 2975 stop(msg); 2976 bind(ok); 2977 pop(1 << rscratch1->encoding(), sp); 2978 } 2979 #endif 2980 } 2981 #endif 2982 2983 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 2984 assert_different_registers(value, tmp1, tmp2); 2985 Label done, tagged, weak_tagged; 2986 2987 cbz(value, done); // Use null as-is. 2988 tst(value, JNIHandles::tag_mask); // Test for tag. 2989 br(Assembler::NE, tagged); 2990 2991 // Resolve local handle 2992 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 2993 verify_oop(value); 2994 b(done); 2995 2996 bind(tagged); 2997 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 2998 tbnz(value, 0, weak_tagged); // Test for weak tag. 2999 3000 // Resolve global handle 3001 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3002 verify_oop(value); 3003 b(done); 3004 3005 bind(weak_tagged); 3006 // Resolve jweak. 3007 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3008 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3009 verify_oop(value); 3010 3011 bind(done); 3012 } 3013 3014 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3015 assert_different_registers(value, tmp1, tmp2); 3016 Label done; 3017 3018 cbz(value, done); // Use null as-is. 3019 3020 #ifdef ASSERT 3021 { 3022 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3023 Label valid_global_tag; 3024 tbnz(value, 1, valid_global_tag); // Test for global tag 3025 stop("non global jobject using resolve_global_jobject"); 3026 bind(valid_global_tag); 3027 } 3028 #endif 3029 3030 // Resolve global handle 3031 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3032 verify_oop(value); 3033 3034 bind(done); 3035 } 3036 3037 void MacroAssembler::stop(const char* msg) { 3038 BLOCK_COMMENT(msg); 3039 // load msg into r0 so we can access it from the signal handler 3040 // ExternalAddress enables saving and restoring via the code cache 3041 lea(c_rarg0, ExternalAddress((address) msg)); 3042 dcps1(0xdeae); 3043 SCCache::add_C_string(msg); 3044 } 3045 3046 void MacroAssembler::unimplemented(const char* what) { 3047 const char* buf = nullptr; 3048 { 3049 ResourceMark rm; 3050 stringStream ss; 3051 ss.print("unimplemented: %s", what); 3052 buf = code_string(ss.as_string()); 3053 } 3054 stop(buf); 3055 } 3056 3057 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3058 #ifdef ASSERT 3059 Label OK; 3060 br(cc, OK); 3061 stop(msg); 3062 bind(OK); 3063 #endif 3064 } 3065 3066 // If a constant does not fit in an immediate field, generate some 3067 // number of MOV instructions and then perform the operation. 3068 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3069 add_sub_imm_insn insn1, 3070 add_sub_reg_insn insn2, 3071 bool is32) { 3072 assert(Rd != zr, "Rd = zr and not setting flags?"); 3073 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3074 if (fits) { 3075 (this->*insn1)(Rd, Rn, imm); 3076 } else { 3077 if (uabs(imm) < (1 << 24)) { 3078 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3079 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3080 } else { 3081 assert_different_registers(Rd, Rn); 3082 mov(Rd, imm); 3083 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3084 } 3085 } 3086 } 3087 3088 // Separate vsn which sets the flags. Optimisations are more restricted 3089 // because we must set the flags correctly. 3090 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3091 add_sub_imm_insn insn1, 3092 add_sub_reg_insn insn2, 3093 bool is32) { 3094 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3095 if (fits) { 3096 (this->*insn1)(Rd, Rn, imm); 3097 } else { 3098 assert_different_registers(Rd, Rn); 3099 assert(Rd != zr, "overflow in immediate operand"); 3100 mov(Rd, imm); 3101 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3102 } 3103 } 3104 3105 3106 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3107 if (increment.is_register()) { 3108 add(Rd, Rn, increment.as_register()); 3109 } else { 3110 add(Rd, Rn, increment.as_constant()); 3111 } 3112 } 3113 3114 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3115 if (increment.is_register()) { 3116 addw(Rd, Rn, increment.as_register()); 3117 } else { 3118 addw(Rd, Rn, increment.as_constant()); 3119 } 3120 } 3121 3122 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3123 if (decrement.is_register()) { 3124 sub(Rd, Rn, decrement.as_register()); 3125 } else { 3126 sub(Rd, Rn, decrement.as_constant()); 3127 } 3128 } 3129 3130 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3131 if (decrement.is_register()) { 3132 subw(Rd, Rn, decrement.as_register()); 3133 } else { 3134 subw(Rd, Rn, decrement.as_constant()); 3135 } 3136 } 3137 3138 void MacroAssembler::reinit_heapbase() 3139 { 3140 if (UseCompressedOops) { 3141 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) { 3142 mov(rheapbase, CompressedOops::ptrs_base()); 3143 } else { 3144 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 3145 ldr(rheapbase, Address(rheapbase)); 3146 } 3147 } 3148 } 3149 3150 // this simulates the behaviour of the x86 cmpxchg instruction using a 3151 // load linked/store conditional pair. we use the acquire/release 3152 // versions of these instructions so that we flush pending writes as 3153 // per Java semantics. 3154 3155 // n.b the x86 version assumes the old value to be compared against is 3156 // in rax and updates rax with the value located in memory if the 3157 // cmpxchg fails. we supply a register for the old value explicitly 3158 3159 // the aarch64 load linked/store conditional instructions do not 3160 // accept an offset. so, unlike x86, we must provide a plain register 3161 // to identify the memory word to be compared/exchanged rather than a 3162 // register+offset Address. 3163 3164 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3165 Label &succeed, Label *fail) { 3166 // oldv holds comparison value 3167 // newv holds value to write in exchange 3168 // addr identifies memory word to compare against/update 3169 if (UseLSE) { 3170 mov(tmp, oldv); 3171 casal(Assembler::xword, oldv, newv, addr); 3172 cmp(tmp, oldv); 3173 br(Assembler::EQ, succeed); 3174 membar(AnyAny); 3175 } else { 3176 Label retry_load, nope; 3177 prfm(Address(addr), PSTL1STRM); 3178 bind(retry_load); 3179 // flush and load exclusive from the memory location 3180 // and fail if it is not what we expect 3181 ldaxr(tmp, addr); 3182 cmp(tmp, oldv); 3183 br(Assembler::NE, nope); 3184 // if we store+flush with no intervening write tmp will be zero 3185 stlxr(tmp, newv, addr); 3186 cbzw(tmp, succeed); 3187 // retry so we only ever return after a load fails to compare 3188 // ensures we don't return a stale value after a failed write. 3189 b(retry_load); 3190 // if the memory word differs we return it in oldv and signal a fail 3191 bind(nope); 3192 membar(AnyAny); 3193 mov(oldv, tmp); 3194 } 3195 if (fail) 3196 b(*fail); 3197 } 3198 3199 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3200 Label &succeed, Label *fail) { 3201 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3202 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3203 } 3204 3205 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3206 Label &succeed, Label *fail) { 3207 // oldv holds comparison value 3208 // newv holds value to write in exchange 3209 // addr identifies memory word to compare against/update 3210 // tmp returns 0/1 for success/failure 3211 if (UseLSE) { 3212 mov(tmp, oldv); 3213 casal(Assembler::word, oldv, newv, addr); 3214 cmp(tmp, oldv); 3215 br(Assembler::EQ, succeed); 3216 membar(AnyAny); 3217 } else { 3218 Label retry_load, nope; 3219 prfm(Address(addr), PSTL1STRM); 3220 bind(retry_load); 3221 // flush and load exclusive from the memory location 3222 // and fail if it is not what we expect 3223 ldaxrw(tmp, addr); 3224 cmp(tmp, oldv); 3225 br(Assembler::NE, nope); 3226 // if we store+flush with no intervening write tmp will be zero 3227 stlxrw(tmp, newv, addr); 3228 cbzw(tmp, succeed); 3229 // retry so we only ever return after a load fails to compare 3230 // ensures we don't return a stale value after a failed write. 3231 b(retry_load); 3232 // if the memory word differs we return it in oldv and signal a fail 3233 bind(nope); 3234 membar(AnyAny); 3235 mov(oldv, tmp); 3236 } 3237 if (fail) 3238 b(*fail); 3239 } 3240 3241 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3242 // doesn't retry and may fail spuriously. If the oldval is wanted, 3243 // Pass a register for the result, otherwise pass noreg. 3244 3245 // Clobbers rscratch1 3246 void MacroAssembler::cmpxchg(Register addr, Register expected, 3247 Register new_val, 3248 enum operand_size size, 3249 bool acquire, bool release, 3250 bool weak, 3251 Register result) { 3252 if (result == noreg) result = rscratch1; 3253 BLOCK_COMMENT("cmpxchg {"); 3254 if (UseLSE) { 3255 mov(result, expected); 3256 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3257 compare_eq(result, expected, size); 3258 #ifdef ASSERT 3259 // Poison rscratch1 which is written on !UseLSE branch 3260 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3261 #endif 3262 } else { 3263 Label retry_load, done; 3264 prfm(Address(addr), PSTL1STRM); 3265 bind(retry_load); 3266 load_exclusive(result, addr, size, acquire); 3267 compare_eq(result, expected, size); 3268 br(Assembler::NE, done); 3269 store_exclusive(rscratch1, new_val, addr, size, release); 3270 if (weak) { 3271 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3272 } else { 3273 cbnzw(rscratch1, retry_load); 3274 } 3275 bind(done); 3276 } 3277 BLOCK_COMMENT("} cmpxchg"); 3278 } 3279 3280 // A generic comparison. Only compares for equality, clobbers rscratch1. 3281 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3282 if (size == xword) { 3283 cmp(rm, rn); 3284 } else if (size == word) { 3285 cmpw(rm, rn); 3286 } else if (size == halfword) { 3287 eorw(rscratch1, rm, rn); 3288 ands(zr, rscratch1, 0xffff); 3289 } else if (size == byte) { 3290 eorw(rscratch1, rm, rn); 3291 ands(zr, rscratch1, 0xff); 3292 } else { 3293 ShouldNotReachHere(); 3294 } 3295 } 3296 3297 3298 static bool different(Register a, RegisterOrConstant b, Register c) { 3299 if (b.is_constant()) 3300 return a != c; 3301 else 3302 return a != b.as_register() && a != c && b.as_register() != c; 3303 } 3304 3305 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3306 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3307 if (UseLSE) { \ 3308 prev = prev->is_valid() ? prev : zr; \ 3309 if (incr.is_register()) { \ 3310 AOP(sz, incr.as_register(), prev, addr); \ 3311 } else { \ 3312 mov(rscratch2, incr.as_constant()); \ 3313 AOP(sz, rscratch2, prev, addr); \ 3314 } \ 3315 return; \ 3316 } \ 3317 Register result = rscratch2; \ 3318 if (prev->is_valid()) \ 3319 result = different(prev, incr, addr) ? prev : rscratch2; \ 3320 \ 3321 Label retry_load; \ 3322 prfm(Address(addr), PSTL1STRM); \ 3323 bind(retry_load); \ 3324 LDXR(result, addr); \ 3325 OP(rscratch1, result, incr); \ 3326 STXR(rscratch2, rscratch1, addr); \ 3327 cbnzw(rscratch2, retry_load); \ 3328 if (prev->is_valid() && prev != result) { \ 3329 IOP(prev, rscratch1, incr); \ 3330 } \ 3331 } 3332 3333 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3334 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3335 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3336 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3337 3338 #undef ATOMIC_OP 3339 3340 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3341 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3342 if (UseLSE) { \ 3343 prev = prev->is_valid() ? prev : zr; \ 3344 AOP(sz, newv, prev, addr); \ 3345 return; \ 3346 } \ 3347 Register result = rscratch2; \ 3348 if (prev->is_valid()) \ 3349 result = different(prev, newv, addr) ? prev : rscratch2; \ 3350 \ 3351 Label retry_load; \ 3352 prfm(Address(addr), PSTL1STRM); \ 3353 bind(retry_load); \ 3354 LDXR(result, addr); \ 3355 STXR(rscratch1, newv, addr); \ 3356 cbnzw(rscratch1, retry_load); \ 3357 if (prev->is_valid() && prev != result) \ 3358 mov(prev, result); \ 3359 } 3360 3361 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3362 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3363 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3364 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3365 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3366 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3367 3368 #undef ATOMIC_XCHG 3369 3370 #ifndef PRODUCT 3371 extern "C" void findpc(intptr_t x); 3372 #endif 3373 3374 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3375 { 3376 // In order to get locks to work, we need to fake a in_VM state 3377 if (ShowMessageBoxOnError ) { 3378 JavaThread* thread = JavaThread::current(); 3379 JavaThreadState saved_state = thread->thread_state(); 3380 thread->set_thread_state(_thread_in_vm); 3381 #ifndef PRODUCT 3382 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3383 ttyLocker ttyl; 3384 BytecodeCounter::print(); 3385 } 3386 #endif 3387 if (os::message_box(msg, "Execution stopped, print registers?")) { 3388 ttyLocker ttyl; 3389 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3390 #ifndef PRODUCT 3391 tty->cr(); 3392 findpc(pc); 3393 tty->cr(); 3394 #endif 3395 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3396 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3397 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3398 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3399 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3400 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3401 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3402 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3403 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3404 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3405 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3406 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3407 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3408 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3409 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3410 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3411 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3412 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3413 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3414 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3415 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3416 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3417 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3418 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3419 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3420 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3421 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3422 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3423 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3424 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3425 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3426 BREAKPOINT; 3427 } 3428 } 3429 fatal("DEBUG MESSAGE: %s", msg); 3430 } 3431 3432 RegSet MacroAssembler::call_clobbered_gp_registers() { 3433 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3434 #ifndef R18_RESERVED 3435 regs += r18_tls; 3436 #endif 3437 return regs; 3438 } 3439 3440 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3441 int step = 4 * wordSize; 3442 push(call_clobbered_gp_registers() - exclude, sp); 3443 sub(sp, sp, step); 3444 mov(rscratch1, -step); 3445 // Push v0-v7, v16-v31. 3446 for (int i = 31; i>= 4; i -= 4) { 3447 if (i <= v7->encoding() || i >= v16->encoding()) 3448 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3449 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3450 } 3451 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3452 as_FloatRegister(3), T1D, Address(sp)); 3453 } 3454 3455 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3456 for (int i = 0; i < 32; i += 4) { 3457 if (i <= v7->encoding() || i >= v16->encoding()) 3458 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3459 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3460 } 3461 3462 reinitialize_ptrue(); 3463 3464 pop(call_clobbered_gp_registers() - exclude, sp); 3465 } 3466 3467 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3468 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3469 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3470 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3471 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3472 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3473 sve_str(as_FloatRegister(i), Address(sp, i)); 3474 } 3475 } else { 3476 int step = (save_vectors ? 8 : 4) * wordSize; 3477 mov(rscratch1, -step); 3478 sub(sp, sp, step); 3479 for (int i = 28; i >= 4; i -= 4) { 3480 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3481 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3482 } 3483 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3484 } 3485 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3486 sub(sp, sp, total_predicate_in_bytes); 3487 for (int i = 0; i < PRegister::number_of_registers; i++) { 3488 sve_str(as_PRegister(i), Address(sp, i)); 3489 } 3490 } 3491 } 3492 3493 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3494 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3495 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3496 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3497 sve_ldr(as_PRegister(i), Address(sp, i)); 3498 } 3499 add(sp, sp, total_predicate_in_bytes); 3500 } 3501 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3502 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3503 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3504 } 3505 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3506 } else { 3507 int step = (restore_vectors ? 8 : 4) * wordSize; 3508 for (int i = 0; i <= 28; i += 4) 3509 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3510 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3511 } 3512 3513 // We may use predicate registers and rely on ptrue with SVE, 3514 // regardless of wide vector (> 8 bytes) used or not. 3515 if (use_sve) { 3516 reinitialize_ptrue(); 3517 } 3518 3519 // integer registers except lr & sp 3520 pop(RegSet::range(r0, r17), sp); 3521 #ifdef R18_RESERVED 3522 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3523 pop(RegSet::range(r20, r29), sp); 3524 #else 3525 pop(RegSet::range(r18_tls, r29), sp); 3526 #endif 3527 } 3528 3529 /** 3530 * Helpers for multiply_to_len(). 3531 */ 3532 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3533 Register src1, Register src2) { 3534 adds(dest_lo, dest_lo, src1); 3535 adc(dest_hi, dest_hi, zr); 3536 adds(dest_lo, dest_lo, src2); 3537 adc(final_dest_hi, dest_hi, zr); 3538 } 3539 3540 // Generate an address from (r + r1 extend offset). "size" is the 3541 // size of the operand. The result may be in rscratch2. 3542 Address MacroAssembler::offsetted_address(Register r, Register r1, 3543 Address::extend ext, int offset, int size) { 3544 if (offset || (ext.shift() % size != 0)) { 3545 lea(rscratch2, Address(r, r1, ext)); 3546 return Address(rscratch2, offset); 3547 } else { 3548 return Address(r, r1, ext); 3549 } 3550 } 3551 3552 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3553 { 3554 assert(offset >= 0, "spill to negative address?"); 3555 // Offset reachable ? 3556 // Not aligned - 9 bits signed offset 3557 // Aligned - 12 bits unsigned offset shifted 3558 Register base = sp; 3559 if ((offset & (size-1)) && offset >= (1<<8)) { 3560 add(tmp, base, offset & ((1<<12)-1)); 3561 base = tmp; 3562 offset &= -1u<<12; 3563 } 3564 3565 if (offset >= (1<<12) * size) { 3566 add(tmp, base, offset & (((1<<12)-1)<<12)); 3567 base = tmp; 3568 offset &= ~(((1<<12)-1)<<12); 3569 } 3570 3571 return Address(base, offset); 3572 } 3573 3574 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3575 assert(offset >= 0, "spill to negative address?"); 3576 3577 Register base = sp; 3578 3579 // An immediate offset in the range 0 to 255 which is multiplied 3580 // by the current vector or predicate register size in bytes. 3581 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3582 return Address(base, offset / sve_reg_size_in_bytes); 3583 } 3584 3585 add(tmp, base, offset); 3586 return Address(tmp); 3587 } 3588 3589 // Checks whether offset is aligned. 3590 // Returns true if it is, else false. 3591 bool MacroAssembler::merge_alignment_check(Register base, 3592 size_t size, 3593 int64_t cur_offset, 3594 int64_t prev_offset) const { 3595 if (AvoidUnalignedAccesses) { 3596 if (base == sp) { 3597 // Checks whether low offset if aligned to pair of registers. 3598 int64_t pair_mask = size * 2 - 1; 3599 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3600 return (offset & pair_mask) == 0; 3601 } else { // If base is not sp, we can't guarantee the access is aligned. 3602 return false; 3603 } 3604 } else { 3605 int64_t mask = size - 1; 3606 // Load/store pair instruction only supports element size aligned offset. 3607 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3608 } 3609 } 3610 3611 // Checks whether current and previous loads/stores can be merged. 3612 // Returns true if it can be merged, else false. 3613 bool MacroAssembler::ldst_can_merge(Register rt, 3614 const Address &adr, 3615 size_t cur_size_in_bytes, 3616 bool is_store) const { 3617 address prev = pc() - NativeInstruction::instruction_size; 3618 address last = code()->last_insn(); 3619 3620 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3621 return false; 3622 } 3623 3624 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3625 return false; 3626 } 3627 3628 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3629 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3630 3631 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3632 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3633 3634 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3635 return false; 3636 } 3637 3638 int64_t max_offset = 63 * prev_size_in_bytes; 3639 int64_t min_offset = -64 * prev_size_in_bytes; 3640 3641 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3642 3643 // Only same base can be merged. 3644 if (adr.base() != prev_ldst->base()) { 3645 return false; 3646 } 3647 3648 int64_t cur_offset = adr.offset(); 3649 int64_t prev_offset = prev_ldst->offset(); 3650 size_t diff = abs(cur_offset - prev_offset); 3651 if (diff != prev_size_in_bytes) { 3652 return false; 3653 } 3654 3655 // Following cases can not be merged: 3656 // ldr x2, [x2, #8] 3657 // ldr x3, [x2, #16] 3658 // or: 3659 // ldr x2, [x3, #8] 3660 // ldr x2, [x3, #16] 3661 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3662 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3663 return false; 3664 } 3665 3666 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3667 // Offset range must be in ldp/stp instruction's range. 3668 if (low_offset > max_offset || low_offset < min_offset) { 3669 return false; 3670 } 3671 3672 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3673 return true; 3674 } 3675 3676 return false; 3677 } 3678 3679 // Merge current load/store with previous load/store into ldp/stp. 3680 void MacroAssembler::merge_ldst(Register rt, 3681 const Address &adr, 3682 size_t cur_size_in_bytes, 3683 bool is_store) { 3684 3685 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3686 3687 Register rt_low, rt_high; 3688 address prev = pc() - NativeInstruction::instruction_size; 3689 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3690 3691 int64_t offset; 3692 3693 if (adr.offset() < prev_ldst->offset()) { 3694 offset = adr.offset(); 3695 rt_low = rt; 3696 rt_high = prev_ldst->target(); 3697 } else { 3698 offset = prev_ldst->offset(); 3699 rt_low = prev_ldst->target(); 3700 rt_high = rt; 3701 } 3702 3703 Address adr_p = Address(prev_ldst->base(), offset); 3704 // Overwrite previous generated binary. 3705 code_section()->set_end(prev); 3706 3707 const size_t sz = prev_ldst->size_in_bytes(); 3708 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3709 if (!is_store) { 3710 BLOCK_COMMENT("merged ldr pair"); 3711 if (sz == 8) { 3712 ldp(rt_low, rt_high, adr_p); 3713 } else { 3714 ldpw(rt_low, rt_high, adr_p); 3715 } 3716 } else { 3717 BLOCK_COMMENT("merged str pair"); 3718 if (sz == 8) { 3719 stp(rt_low, rt_high, adr_p); 3720 } else { 3721 stpw(rt_low, rt_high, adr_p); 3722 } 3723 } 3724 } 3725 3726 /** 3727 * Multiply 64 bit by 64 bit first loop. 3728 */ 3729 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3730 Register y, Register y_idx, Register z, 3731 Register carry, Register product, 3732 Register idx, Register kdx) { 3733 // 3734 // jlong carry, x[], y[], z[]; 3735 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3736 // huge_128 product = y[idx] * x[xstart] + carry; 3737 // z[kdx] = (jlong)product; 3738 // carry = (jlong)(product >>> 64); 3739 // } 3740 // z[xstart] = carry; 3741 // 3742 3743 Label L_first_loop, L_first_loop_exit; 3744 Label L_one_x, L_one_y, L_multiply; 3745 3746 subsw(xstart, xstart, 1); 3747 br(Assembler::MI, L_one_x); 3748 3749 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3750 ldr(x_xstart, Address(rscratch1)); 3751 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3752 3753 bind(L_first_loop); 3754 subsw(idx, idx, 1); 3755 br(Assembler::MI, L_first_loop_exit); 3756 subsw(idx, idx, 1); 3757 br(Assembler::MI, L_one_y); 3758 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3759 ldr(y_idx, Address(rscratch1)); 3760 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3761 bind(L_multiply); 3762 3763 // AArch64 has a multiply-accumulate instruction that we can't use 3764 // here because it has no way to process carries, so we have to use 3765 // separate add and adc instructions. Bah. 3766 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3767 mul(product, x_xstart, y_idx); 3768 adds(product, product, carry); 3769 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3770 3771 subw(kdx, kdx, 2); 3772 ror(product, product, 32); // back to big-endian 3773 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3774 3775 b(L_first_loop); 3776 3777 bind(L_one_y); 3778 ldrw(y_idx, Address(y, 0)); 3779 b(L_multiply); 3780 3781 bind(L_one_x); 3782 ldrw(x_xstart, Address(x, 0)); 3783 b(L_first_loop); 3784 3785 bind(L_first_loop_exit); 3786 } 3787 3788 /** 3789 * Multiply 128 bit by 128. Unrolled inner loop. 3790 * 3791 */ 3792 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3793 Register carry, Register carry2, 3794 Register idx, Register jdx, 3795 Register yz_idx1, Register yz_idx2, 3796 Register tmp, Register tmp3, Register tmp4, 3797 Register tmp6, Register product_hi) { 3798 3799 // jlong carry, x[], y[], z[]; 3800 // int kdx = ystart+1; 3801 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3802 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3803 // jlong carry2 = (jlong)(tmp3 >>> 64); 3804 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3805 // carry = (jlong)(tmp4 >>> 64); 3806 // z[kdx+idx+1] = (jlong)tmp3; 3807 // z[kdx+idx] = (jlong)tmp4; 3808 // } 3809 // idx += 2; 3810 // if (idx > 0) { 3811 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3812 // z[kdx+idx] = (jlong)yz_idx1; 3813 // carry = (jlong)(yz_idx1 >>> 64); 3814 // } 3815 // 3816 3817 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3818 3819 lsrw(jdx, idx, 2); 3820 3821 bind(L_third_loop); 3822 3823 subsw(jdx, jdx, 1); 3824 br(Assembler::MI, L_third_loop_exit); 3825 subw(idx, idx, 4); 3826 3827 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3828 3829 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3830 3831 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3832 3833 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3834 ror(yz_idx2, yz_idx2, 32); 3835 3836 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3837 3838 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3839 umulh(tmp4, product_hi, yz_idx1); 3840 3841 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3842 ror(rscratch2, rscratch2, 32); 3843 3844 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3845 umulh(carry2, product_hi, yz_idx2); 3846 3847 // propagate sum of both multiplications into carry:tmp4:tmp3 3848 adds(tmp3, tmp3, carry); 3849 adc(tmp4, tmp4, zr); 3850 adds(tmp3, tmp3, rscratch1); 3851 adcs(tmp4, tmp4, tmp); 3852 adc(carry, carry2, zr); 3853 adds(tmp4, tmp4, rscratch2); 3854 adc(carry, carry, zr); 3855 3856 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3857 ror(tmp4, tmp4, 32); 3858 stp(tmp4, tmp3, Address(tmp6, 0)); 3859 3860 b(L_third_loop); 3861 bind (L_third_loop_exit); 3862 3863 andw (idx, idx, 0x3); 3864 cbz(idx, L_post_third_loop_done); 3865 3866 Label L_check_1; 3867 subsw(idx, idx, 2); 3868 br(Assembler::MI, L_check_1); 3869 3870 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3871 ldr(yz_idx1, Address(rscratch1, 0)); 3872 ror(yz_idx1, yz_idx1, 32); 3873 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3874 umulh(tmp4, product_hi, yz_idx1); 3875 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3876 ldr(yz_idx2, Address(rscratch1, 0)); 3877 ror(yz_idx2, yz_idx2, 32); 3878 3879 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3880 3881 ror(tmp3, tmp3, 32); 3882 str(tmp3, Address(rscratch1, 0)); 3883 3884 bind (L_check_1); 3885 3886 andw (idx, idx, 0x1); 3887 subsw(idx, idx, 1); 3888 br(Assembler::MI, L_post_third_loop_done); 3889 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3890 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3891 umulh(carry2, tmp4, product_hi); 3892 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3893 3894 add2_with_carry(carry2, tmp3, tmp4, carry); 3895 3896 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3897 extr(carry, carry2, tmp3, 32); 3898 3899 bind(L_post_third_loop_done); 3900 } 3901 3902 /** 3903 * Code for BigInteger::multiplyToLen() intrinsic. 3904 * 3905 * r0: x 3906 * r1: xlen 3907 * r2: y 3908 * r3: ylen 3909 * r4: z 3910 * r5: zlen 3911 * r10: tmp1 3912 * r11: tmp2 3913 * r12: tmp3 3914 * r13: tmp4 3915 * r14: tmp5 3916 * r15: tmp6 3917 * r16: tmp7 3918 * 3919 */ 3920 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3921 Register z, Register zlen, 3922 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3923 Register tmp5, Register tmp6, Register product_hi) { 3924 3925 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 3926 3927 const Register idx = tmp1; 3928 const Register kdx = tmp2; 3929 const Register xstart = tmp3; 3930 3931 const Register y_idx = tmp4; 3932 const Register carry = tmp5; 3933 const Register product = xlen; 3934 const Register x_xstart = zlen; // reuse register 3935 3936 // First Loop. 3937 // 3938 // final static long LONG_MASK = 0xffffffffL; 3939 // int xstart = xlen - 1; 3940 // int ystart = ylen - 1; 3941 // long carry = 0; 3942 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3943 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3944 // z[kdx] = (int)product; 3945 // carry = product >>> 32; 3946 // } 3947 // z[xstart] = (int)carry; 3948 // 3949 3950 movw(idx, ylen); // idx = ylen; 3951 movw(kdx, zlen); // kdx = xlen+ylen; 3952 mov(carry, zr); // carry = 0; 3953 3954 Label L_done; 3955 3956 movw(xstart, xlen); 3957 subsw(xstart, xstart, 1); 3958 br(Assembler::MI, L_done); 3959 3960 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3961 3962 Label L_second_loop; 3963 cbzw(kdx, L_second_loop); 3964 3965 Label L_carry; 3966 subw(kdx, kdx, 1); 3967 cbzw(kdx, L_carry); 3968 3969 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3970 lsr(carry, carry, 32); 3971 subw(kdx, kdx, 1); 3972 3973 bind(L_carry); 3974 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3975 3976 // Second and third (nested) loops. 3977 // 3978 // for (int i = xstart-1; i >= 0; i--) { // Second loop 3979 // carry = 0; 3980 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 3981 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 3982 // (z[k] & LONG_MASK) + carry; 3983 // z[k] = (int)product; 3984 // carry = product >>> 32; 3985 // } 3986 // z[i] = (int)carry; 3987 // } 3988 // 3989 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 3990 3991 const Register jdx = tmp1; 3992 3993 bind(L_second_loop); 3994 mov(carry, zr); // carry = 0; 3995 movw(jdx, ylen); // j = ystart+1 3996 3997 subsw(xstart, xstart, 1); // i = xstart-1; 3998 br(Assembler::MI, L_done); 3999 4000 str(z, Address(pre(sp, -4 * wordSize))); 4001 4002 Label L_last_x; 4003 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4004 subsw(xstart, xstart, 1); // i = xstart-1; 4005 br(Assembler::MI, L_last_x); 4006 4007 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4008 ldr(product_hi, Address(rscratch1)); 4009 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4010 4011 Label L_third_loop_prologue; 4012 bind(L_third_loop_prologue); 4013 4014 str(ylen, Address(sp, wordSize)); 4015 stp(x, xstart, Address(sp, 2 * wordSize)); 4016 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4017 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4018 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4019 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4020 4021 addw(tmp3, xlen, 1); 4022 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4023 subsw(tmp3, tmp3, 1); 4024 br(Assembler::MI, L_done); 4025 4026 lsr(carry, carry, 32); 4027 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4028 b(L_second_loop); 4029 4030 // Next infrequent code is moved outside loops. 4031 bind(L_last_x); 4032 ldrw(product_hi, Address(x, 0)); 4033 b(L_third_loop_prologue); 4034 4035 bind(L_done); 4036 } 4037 4038 // Code for BigInteger::mulAdd intrinsic 4039 // out = r0 4040 // in = r1 4041 // offset = r2 (already out.length-offset) 4042 // len = r3 4043 // k = r4 4044 // 4045 // pseudo code from java implementation: 4046 // carry = 0; 4047 // offset = out.length-offset - 1; 4048 // for (int j=len-1; j >= 0; j--) { 4049 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4050 // out[offset--] = (int)product; 4051 // carry = product >>> 32; 4052 // } 4053 // return (int)carry; 4054 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4055 Register len, Register k) { 4056 Label LOOP, END; 4057 // pre-loop 4058 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4059 csel(out, zr, out, Assembler::EQ); 4060 br(Assembler::EQ, END); 4061 add(in, in, len, LSL, 2); // in[j+1] address 4062 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4063 mov(out, zr); // used to keep carry now 4064 BIND(LOOP); 4065 ldrw(rscratch1, Address(pre(in, -4))); 4066 madd(rscratch1, rscratch1, k, out); 4067 ldrw(rscratch2, Address(pre(offset, -4))); 4068 add(rscratch1, rscratch1, rscratch2); 4069 strw(rscratch1, Address(offset)); 4070 lsr(out, rscratch1, 32); 4071 subs(len, len, 1); 4072 br(Assembler::NE, LOOP); 4073 BIND(END); 4074 } 4075 4076 /** 4077 * Emits code to update CRC-32 with a byte value according to constants in table 4078 * 4079 * @param [in,out]crc Register containing the crc. 4080 * @param [in]val Register containing the byte to fold into the CRC. 4081 * @param [in]table Register containing the table of crc constants. 4082 * 4083 * uint32_t crc; 4084 * val = crc_table[(val ^ crc) & 0xFF]; 4085 * crc = val ^ (crc >> 8); 4086 * 4087 */ 4088 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4089 eor(val, val, crc); 4090 andr(val, val, 0xff); 4091 ldrw(val, Address(table, val, Address::lsl(2))); 4092 eor(crc, val, crc, Assembler::LSR, 8); 4093 } 4094 4095 /** 4096 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4097 * 4098 * @param [in,out]crc Register containing the crc. 4099 * @param [in]v Register containing the 32-bit to fold into the CRC. 4100 * @param [in]table0 Register containing table 0 of crc constants. 4101 * @param [in]table1 Register containing table 1 of crc constants. 4102 * @param [in]table2 Register containing table 2 of crc constants. 4103 * @param [in]table3 Register containing table 3 of crc constants. 4104 * 4105 * uint32_t crc; 4106 * v = crc ^ v 4107 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4108 * 4109 */ 4110 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4111 Register table0, Register table1, Register table2, Register table3, 4112 bool upper) { 4113 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4114 uxtb(tmp, v); 4115 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4116 ubfx(tmp, v, 8, 8); 4117 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4118 eor(crc, crc, tmp); 4119 ubfx(tmp, v, 16, 8); 4120 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4121 eor(crc, crc, tmp); 4122 ubfx(tmp, v, 24, 8); 4123 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4124 eor(crc, crc, tmp); 4125 } 4126 4127 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4128 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4129 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4130 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4131 4132 subs(tmp0, len, 384); 4133 mvnw(crc, crc); 4134 br(Assembler::GE, CRC_by128_pre); 4135 BIND(CRC_less128); 4136 subs(len, len, 32); 4137 br(Assembler::GE, CRC_by32_loop); 4138 BIND(CRC_less32); 4139 adds(len, len, 32 - 4); 4140 br(Assembler::GE, CRC_by4_loop); 4141 adds(len, len, 4); 4142 br(Assembler::GT, CRC_by1_loop); 4143 b(L_exit); 4144 4145 BIND(CRC_by32_loop); 4146 ldp(tmp0, tmp1, Address(buf)); 4147 crc32x(crc, crc, tmp0); 4148 ldp(tmp2, tmp3, Address(buf, 16)); 4149 crc32x(crc, crc, tmp1); 4150 add(buf, buf, 32); 4151 crc32x(crc, crc, tmp2); 4152 subs(len, len, 32); 4153 crc32x(crc, crc, tmp3); 4154 br(Assembler::GE, CRC_by32_loop); 4155 cmn(len, (u1)32); 4156 br(Assembler::NE, CRC_less32); 4157 b(L_exit); 4158 4159 BIND(CRC_by4_loop); 4160 ldrw(tmp0, Address(post(buf, 4))); 4161 subs(len, len, 4); 4162 crc32w(crc, crc, tmp0); 4163 br(Assembler::GE, CRC_by4_loop); 4164 adds(len, len, 4); 4165 br(Assembler::LE, L_exit); 4166 BIND(CRC_by1_loop); 4167 ldrb(tmp0, Address(post(buf, 1))); 4168 subs(len, len, 1); 4169 crc32b(crc, crc, tmp0); 4170 br(Assembler::GT, CRC_by1_loop); 4171 b(L_exit); 4172 4173 BIND(CRC_by128_pre); 4174 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4175 4*256*sizeof(juint) + 8*sizeof(juint)); 4176 mov(crc, 0); 4177 crc32x(crc, crc, tmp0); 4178 crc32x(crc, crc, tmp1); 4179 4180 cbnz(len, CRC_less128); 4181 4182 BIND(L_exit); 4183 mvnw(crc, crc); 4184 } 4185 4186 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4187 Register len, Register tmp0, Register tmp1, Register tmp2, 4188 Register tmp3) { 4189 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4190 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4191 4192 mvnw(crc, crc); 4193 4194 subs(len, len, 128); 4195 br(Assembler::GE, CRC_by64_pre); 4196 BIND(CRC_less64); 4197 adds(len, len, 128-32); 4198 br(Assembler::GE, CRC_by32_loop); 4199 BIND(CRC_less32); 4200 adds(len, len, 32-4); 4201 br(Assembler::GE, CRC_by4_loop); 4202 adds(len, len, 4); 4203 br(Assembler::GT, CRC_by1_loop); 4204 b(L_exit); 4205 4206 BIND(CRC_by32_loop); 4207 ldp(tmp0, tmp1, Address(post(buf, 16))); 4208 subs(len, len, 32); 4209 crc32x(crc, crc, tmp0); 4210 ldr(tmp2, Address(post(buf, 8))); 4211 crc32x(crc, crc, tmp1); 4212 ldr(tmp3, Address(post(buf, 8))); 4213 crc32x(crc, crc, tmp2); 4214 crc32x(crc, crc, tmp3); 4215 br(Assembler::GE, CRC_by32_loop); 4216 cmn(len, (u1)32); 4217 br(Assembler::NE, CRC_less32); 4218 b(L_exit); 4219 4220 BIND(CRC_by4_loop); 4221 ldrw(tmp0, Address(post(buf, 4))); 4222 subs(len, len, 4); 4223 crc32w(crc, crc, tmp0); 4224 br(Assembler::GE, CRC_by4_loop); 4225 adds(len, len, 4); 4226 br(Assembler::LE, L_exit); 4227 BIND(CRC_by1_loop); 4228 ldrb(tmp0, Address(post(buf, 1))); 4229 subs(len, len, 1); 4230 crc32b(crc, crc, tmp0); 4231 br(Assembler::GT, CRC_by1_loop); 4232 b(L_exit); 4233 4234 BIND(CRC_by64_pre); 4235 sub(buf, buf, 8); 4236 ldp(tmp0, tmp1, Address(buf, 8)); 4237 crc32x(crc, crc, tmp0); 4238 ldr(tmp2, Address(buf, 24)); 4239 crc32x(crc, crc, tmp1); 4240 ldr(tmp3, Address(buf, 32)); 4241 crc32x(crc, crc, tmp2); 4242 ldr(tmp0, Address(buf, 40)); 4243 crc32x(crc, crc, tmp3); 4244 ldr(tmp1, Address(buf, 48)); 4245 crc32x(crc, crc, tmp0); 4246 ldr(tmp2, Address(buf, 56)); 4247 crc32x(crc, crc, tmp1); 4248 ldr(tmp3, Address(pre(buf, 64))); 4249 4250 b(CRC_by64_loop); 4251 4252 align(CodeEntryAlignment); 4253 BIND(CRC_by64_loop); 4254 subs(len, len, 64); 4255 crc32x(crc, crc, tmp2); 4256 ldr(tmp0, Address(buf, 8)); 4257 crc32x(crc, crc, tmp3); 4258 ldr(tmp1, Address(buf, 16)); 4259 crc32x(crc, crc, tmp0); 4260 ldr(tmp2, Address(buf, 24)); 4261 crc32x(crc, crc, tmp1); 4262 ldr(tmp3, Address(buf, 32)); 4263 crc32x(crc, crc, tmp2); 4264 ldr(tmp0, Address(buf, 40)); 4265 crc32x(crc, crc, tmp3); 4266 ldr(tmp1, Address(buf, 48)); 4267 crc32x(crc, crc, tmp0); 4268 ldr(tmp2, Address(buf, 56)); 4269 crc32x(crc, crc, tmp1); 4270 ldr(tmp3, Address(pre(buf, 64))); 4271 br(Assembler::GE, CRC_by64_loop); 4272 4273 // post-loop 4274 crc32x(crc, crc, tmp2); 4275 crc32x(crc, crc, tmp3); 4276 4277 sub(len, len, 64); 4278 add(buf, buf, 8); 4279 cmn(len, (u1)128); 4280 br(Assembler::NE, CRC_less64); 4281 BIND(L_exit); 4282 mvnw(crc, crc); 4283 } 4284 4285 /** 4286 * @param crc register containing existing CRC (32-bit) 4287 * @param buf register pointing to input byte buffer (byte*) 4288 * @param len register containing number of bytes 4289 * @param table register that will contain address of CRC table 4290 * @param tmp scratch register 4291 */ 4292 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4293 Register table0, Register table1, Register table2, Register table3, 4294 Register tmp, Register tmp2, Register tmp3) { 4295 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4296 4297 if (UseCryptoPmullForCRC32) { 4298 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4299 return; 4300 } 4301 4302 if (UseCRC32) { 4303 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4304 return; 4305 } 4306 4307 mvnw(crc, crc); 4308 4309 { 4310 uint64_t offset; 4311 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4312 add(table0, table0, offset); 4313 } 4314 add(table1, table0, 1*256*sizeof(juint)); 4315 add(table2, table0, 2*256*sizeof(juint)); 4316 add(table3, table0, 3*256*sizeof(juint)); 4317 4318 { // Neon code start 4319 cmp(len, (u1)64); 4320 br(Assembler::LT, L_by16); 4321 eor(v16, T16B, v16, v16); 4322 4323 Label L_fold; 4324 4325 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4326 4327 ld1(v0, v1, T2D, post(buf, 32)); 4328 ld1r(v4, T2D, post(tmp, 8)); 4329 ld1r(v5, T2D, post(tmp, 8)); 4330 ld1r(v6, T2D, post(tmp, 8)); 4331 ld1r(v7, T2D, post(tmp, 8)); 4332 mov(v16, S, 0, crc); 4333 4334 eor(v0, T16B, v0, v16); 4335 sub(len, len, 64); 4336 4337 BIND(L_fold); 4338 pmull(v22, T8H, v0, v5, T8B); 4339 pmull(v20, T8H, v0, v7, T8B); 4340 pmull(v23, T8H, v0, v4, T8B); 4341 pmull(v21, T8H, v0, v6, T8B); 4342 4343 pmull2(v18, T8H, v0, v5, T16B); 4344 pmull2(v16, T8H, v0, v7, T16B); 4345 pmull2(v19, T8H, v0, v4, T16B); 4346 pmull2(v17, T8H, v0, v6, T16B); 4347 4348 uzp1(v24, T8H, v20, v22); 4349 uzp2(v25, T8H, v20, v22); 4350 eor(v20, T16B, v24, v25); 4351 4352 uzp1(v26, T8H, v16, v18); 4353 uzp2(v27, T8H, v16, v18); 4354 eor(v16, T16B, v26, v27); 4355 4356 ushll2(v22, T4S, v20, T8H, 8); 4357 ushll(v20, T4S, v20, T4H, 8); 4358 4359 ushll2(v18, T4S, v16, T8H, 8); 4360 ushll(v16, T4S, v16, T4H, 8); 4361 4362 eor(v22, T16B, v23, v22); 4363 eor(v18, T16B, v19, v18); 4364 eor(v20, T16B, v21, v20); 4365 eor(v16, T16B, v17, v16); 4366 4367 uzp1(v17, T2D, v16, v20); 4368 uzp2(v21, T2D, v16, v20); 4369 eor(v17, T16B, v17, v21); 4370 4371 ushll2(v20, T2D, v17, T4S, 16); 4372 ushll(v16, T2D, v17, T2S, 16); 4373 4374 eor(v20, T16B, v20, v22); 4375 eor(v16, T16B, v16, v18); 4376 4377 uzp1(v17, T2D, v20, v16); 4378 uzp2(v21, T2D, v20, v16); 4379 eor(v28, T16B, v17, v21); 4380 4381 pmull(v22, T8H, v1, v5, T8B); 4382 pmull(v20, T8H, v1, v7, T8B); 4383 pmull(v23, T8H, v1, v4, T8B); 4384 pmull(v21, T8H, v1, v6, T8B); 4385 4386 pmull2(v18, T8H, v1, v5, T16B); 4387 pmull2(v16, T8H, v1, v7, T16B); 4388 pmull2(v19, T8H, v1, v4, T16B); 4389 pmull2(v17, T8H, v1, v6, T16B); 4390 4391 ld1(v0, v1, T2D, post(buf, 32)); 4392 4393 uzp1(v24, T8H, v20, v22); 4394 uzp2(v25, T8H, v20, v22); 4395 eor(v20, T16B, v24, v25); 4396 4397 uzp1(v26, T8H, v16, v18); 4398 uzp2(v27, T8H, v16, v18); 4399 eor(v16, T16B, v26, v27); 4400 4401 ushll2(v22, T4S, v20, T8H, 8); 4402 ushll(v20, T4S, v20, T4H, 8); 4403 4404 ushll2(v18, T4S, v16, T8H, 8); 4405 ushll(v16, T4S, v16, T4H, 8); 4406 4407 eor(v22, T16B, v23, v22); 4408 eor(v18, T16B, v19, v18); 4409 eor(v20, T16B, v21, v20); 4410 eor(v16, T16B, v17, v16); 4411 4412 uzp1(v17, T2D, v16, v20); 4413 uzp2(v21, T2D, v16, v20); 4414 eor(v16, T16B, v17, v21); 4415 4416 ushll2(v20, T2D, v16, T4S, 16); 4417 ushll(v16, T2D, v16, T2S, 16); 4418 4419 eor(v20, T16B, v22, v20); 4420 eor(v16, T16B, v16, v18); 4421 4422 uzp1(v17, T2D, v20, v16); 4423 uzp2(v21, T2D, v20, v16); 4424 eor(v20, T16B, v17, v21); 4425 4426 shl(v16, T2D, v28, 1); 4427 shl(v17, T2D, v20, 1); 4428 4429 eor(v0, T16B, v0, v16); 4430 eor(v1, T16B, v1, v17); 4431 4432 subs(len, len, 32); 4433 br(Assembler::GE, L_fold); 4434 4435 mov(crc, 0); 4436 mov(tmp, v0, D, 0); 4437 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4438 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4439 mov(tmp, v0, D, 1); 4440 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4441 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4442 mov(tmp, v1, D, 0); 4443 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4444 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4445 mov(tmp, v1, D, 1); 4446 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4447 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4448 4449 add(len, len, 32); 4450 } // Neon code end 4451 4452 BIND(L_by16); 4453 subs(len, len, 16); 4454 br(Assembler::GE, L_by16_loop); 4455 adds(len, len, 16-4); 4456 br(Assembler::GE, L_by4_loop); 4457 adds(len, len, 4); 4458 br(Assembler::GT, L_by1_loop); 4459 b(L_exit); 4460 4461 BIND(L_by4_loop); 4462 ldrw(tmp, Address(post(buf, 4))); 4463 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4464 subs(len, len, 4); 4465 br(Assembler::GE, L_by4_loop); 4466 adds(len, len, 4); 4467 br(Assembler::LE, L_exit); 4468 BIND(L_by1_loop); 4469 subs(len, len, 1); 4470 ldrb(tmp, Address(post(buf, 1))); 4471 update_byte_crc32(crc, tmp, table0); 4472 br(Assembler::GT, L_by1_loop); 4473 b(L_exit); 4474 4475 align(CodeEntryAlignment); 4476 BIND(L_by16_loop); 4477 subs(len, len, 16); 4478 ldp(tmp, tmp3, Address(post(buf, 16))); 4479 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4480 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4481 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4482 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4483 br(Assembler::GE, L_by16_loop); 4484 adds(len, len, 16-4); 4485 br(Assembler::GE, L_by4_loop); 4486 adds(len, len, 4); 4487 br(Assembler::GT, L_by1_loop); 4488 BIND(L_exit); 4489 mvnw(crc, crc); 4490 } 4491 4492 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4493 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4494 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4495 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4496 4497 subs(tmp0, len, 384); 4498 br(Assembler::GE, CRC_by128_pre); 4499 BIND(CRC_less128); 4500 subs(len, len, 32); 4501 br(Assembler::GE, CRC_by32_loop); 4502 BIND(CRC_less32); 4503 adds(len, len, 32 - 4); 4504 br(Assembler::GE, CRC_by4_loop); 4505 adds(len, len, 4); 4506 br(Assembler::GT, CRC_by1_loop); 4507 b(L_exit); 4508 4509 BIND(CRC_by32_loop); 4510 ldp(tmp0, tmp1, Address(buf)); 4511 crc32cx(crc, crc, tmp0); 4512 ldr(tmp2, Address(buf, 16)); 4513 crc32cx(crc, crc, tmp1); 4514 ldr(tmp3, Address(buf, 24)); 4515 crc32cx(crc, crc, tmp2); 4516 add(buf, buf, 32); 4517 subs(len, len, 32); 4518 crc32cx(crc, crc, tmp3); 4519 br(Assembler::GE, CRC_by32_loop); 4520 cmn(len, (u1)32); 4521 br(Assembler::NE, CRC_less32); 4522 b(L_exit); 4523 4524 BIND(CRC_by4_loop); 4525 ldrw(tmp0, Address(post(buf, 4))); 4526 subs(len, len, 4); 4527 crc32cw(crc, crc, tmp0); 4528 br(Assembler::GE, CRC_by4_loop); 4529 adds(len, len, 4); 4530 br(Assembler::LE, L_exit); 4531 BIND(CRC_by1_loop); 4532 ldrb(tmp0, Address(post(buf, 1))); 4533 subs(len, len, 1); 4534 crc32cb(crc, crc, tmp0); 4535 br(Assembler::GT, CRC_by1_loop); 4536 b(L_exit); 4537 4538 BIND(CRC_by128_pre); 4539 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4540 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4541 mov(crc, 0); 4542 crc32cx(crc, crc, tmp0); 4543 crc32cx(crc, crc, tmp1); 4544 4545 cbnz(len, CRC_less128); 4546 4547 BIND(L_exit); 4548 } 4549 4550 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4551 Register len, Register tmp0, Register tmp1, Register tmp2, 4552 Register tmp3) { 4553 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4554 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4555 4556 subs(len, len, 128); 4557 br(Assembler::GE, CRC_by64_pre); 4558 BIND(CRC_less64); 4559 adds(len, len, 128-32); 4560 br(Assembler::GE, CRC_by32_loop); 4561 BIND(CRC_less32); 4562 adds(len, len, 32-4); 4563 br(Assembler::GE, CRC_by4_loop); 4564 adds(len, len, 4); 4565 br(Assembler::GT, CRC_by1_loop); 4566 b(L_exit); 4567 4568 BIND(CRC_by32_loop); 4569 ldp(tmp0, tmp1, Address(post(buf, 16))); 4570 subs(len, len, 32); 4571 crc32cx(crc, crc, tmp0); 4572 ldr(tmp2, Address(post(buf, 8))); 4573 crc32cx(crc, crc, tmp1); 4574 ldr(tmp3, Address(post(buf, 8))); 4575 crc32cx(crc, crc, tmp2); 4576 crc32cx(crc, crc, tmp3); 4577 br(Assembler::GE, CRC_by32_loop); 4578 cmn(len, (u1)32); 4579 br(Assembler::NE, CRC_less32); 4580 b(L_exit); 4581 4582 BIND(CRC_by4_loop); 4583 ldrw(tmp0, Address(post(buf, 4))); 4584 subs(len, len, 4); 4585 crc32cw(crc, crc, tmp0); 4586 br(Assembler::GE, CRC_by4_loop); 4587 adds(len, len, 4); 4588 br(Assembler::LE, L_exit); 4589 BIND(CRC_by1_loop); 4590 ldrb(tmp0, Address(post(buf, 1))); 4591 subs(len, len, 1); 4592 crc32cb(crc, crc, tmp0); 4593 br(Assembler::GT, CRC_by1_loop); 4594 b(L_exit); 4595 4596 BIND(CRC_by64_pre); 4597 sub(buf, buf, 8); 4598 ldp(tmp0, tmp1, Address(buf, 8)); 4599 crc32cx(crc, crc, tmp0); 4600 ldr(tmp2, Address(buf, 24)); 4601 crc32cx(crc, crc, tmp1); 4602 ldr(tmp3, Address(buf, 32)); 4603 crc32cx(crc, crc, tmp2); 4604 ldr(tmp0, Address(buf, 40)); 4605 crc32cx(crc, crc, tmp3); 4606 ldr(tmp1, Address(buf, 48)); 4607 crc32cx(crc, crc, tmp0); 4608 ldr(tmp2, Address(buf, 56)); 4609 crc32cx(crc, crc, tmp1); 4610 ldr(tmp3, Address(pre(buf, 64))); 4611 4612 b(CRC_by64_loop); 4613 4614 align(CodeEntryAlignment); 4615 BIND(CRC_by64_loop); 4616 subs(len, len, 64); 4617 crc32cx(crc, crc, tmp2); 4618 ldr(tmp0, Address(buf, 8)); 4619 crc32cx(crc, crc, tmp3); 4620 ldr(tmp1, Address(buf, 16)); 4621 crc32cx(crc, crc, tmp0); 4622 ldr(tmp2, Address(buf, 24)); 4623 crc32cx(crc, crc, tmp1); 4624 ldr(tmp3, Address(buf, 32)); 4625 crc32cx(crc, crc, tmp2); 4626 ldr(tmp0, Address(buf, 40)); 4627 crc32cx(crc, crc, tmp3); 4628 ldr(tmp1, Address(buf, 48)); 4629 crc32cx(crc, crc, tmp0); 4630 ldr(tmp2, Address(buf, 56)); 4631 crc32cx(crc, crc, tmp1); 4632 ldr(tmp3, Address(pre(buf, 64))); 4633 br(Assembler::GE, CRC_by64_loop); 4634 4635 // post-loop 4636 crc32cx(crc, crc, tmp2); 4637 crc32cx(crc, crc, tmp3); 4638 4639 sub(len, len, 64); 4640 add(buf, buf, 8); 4641 cmn(len, (u1)128); 4642 br(Assembler::NE, CRC_less64); 4643 BIND(L_exit); 4644 } 4645 4646 /** 4647 * @param crc register containing existing CRC (32-bit) 4648 * @param buf register pointing to input byte buffer (byte*) 4649 * @param len register containing number of bytes 4650 * @param table register that will contain address of CRC table 4651 * @param tmp scratch register 4652 */ 4653 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4654 Register table0, Register table1, Register table2, Register table3, 4655 Register tmp, Register tmp2, Register tmp3) { 4656 if (UseCryptoPmullForCRC32) { 4657 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4658 } else { 4659 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4660 } 4661 } 4662 4663 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4664 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4665 Label CRC_by128_loop; 4666 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4667 4668 sub(len, len, 256); 4669 Register table = tmp0; 4670 { 4671 uint64_t offset; 4672 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4673 add(table, table, offset); 4674 } 4675 add(table, table, table_offset); 4676 4677 // Registers v0..v7 are used as data registers. 4678 // Registers v16..v31 are used as tmp registers. 4679 sub(buf, buf, 0x10); 4680 ldrq(v0, Address(buf, 0x10)); 4681 ldrq(v1, Address(buf, 0x20)); 4682 ldrq(v2, Address(buf, 0x30)); 4683 ldrq(v3, Address(buf, 0x40)); 4684 ldrq(v4, Address(buf, 0x50)); 4685 ldrq(v5, Address(buf, 0x60)); 4686 ldrq(v6, Address(buf, 0x70)); 4687 ldrq(v7, Address(pre(buf, 0x80))); 4688 4689 movi(v31, T4S, 0); 4690 mov(v31, S, 0, crc); 4691 eor(v0, T16B, v0, v31); 4692 4693 // Register v16 contains constants from the crc table. 4694 ldrq(v16, Address(table)); 4695 b(CRC_by128_loop); 4696 4697 align(OptoLoopAlignment); 4698 BIND(CRC_by128_loop); 4699 pmull (v17, T1Q, v0, v16, T1D); 4700 pmull2(v18, T1Q, v0, v16, T2D); 4701 ldrq(v0, Address(buf, 0x10)); 4702 eor3(v0, T16B, v17, v18, v0); 4703 4704 pmull (v19, T1Q, v1, v16, T1D); 4705 pmull2(v20, T1Q, v1, v16, T2D); 4706 ldrq(v1, Address(buf, 0x20)); 4707 eor3(v1, T16B, v19, v20, v1); 4708 4709 pmull (v21, T1Q, v2, v16, T1D); 4710 pmull2(v22, T1Q, v2, v16, T2D); 4711 ldrq(v2, Address(buf, 0x30)); 4712 eor3(v2, T16B, v21, v22, v2); 4713 4714 pmull (v23, T1Q, v3, v16, T1D); 4715 pmull2(v24, T1Q, v3, v16, T2D); 4716 ldrq(v3, Address(buf, 0x40)); 4717 eor3(v3, T16B, v23, v24, v3); 4718 4719 pmull (v25, T1Q, v4, v16, T1D); 4720 pmull2(v26, T1Q, v4, v16, T2D); 4721 ldrq(v4, Address(buf, 0x50)); 4722 eor3(v4, T16B, v25, v26, v4); 4723 4724 pmull (v27, T1Q, v5, v16, T1D); 4725 pmull2(v28, T1Q, v5, v16, T2D); 4726 ldrq(v5, Address(buf, 0x60)); 4727 eor3(v5, T16B, v27, v28, v5); 4728 4729 pmull (v29, T1Q, v6, v16, T1D); 4730 pmull2(v30, T1Q, v6, v16, T2D); 4731 ldrq(v6, Address(buf, 0x70)); 4732 eor3(v6, T16B, v29, v30, v6); 4733 4734 // Reuse registers v23, v24. 4735 // Using them won't block the first instruction of the next iteration. 4736 pmull (v23, T1Q, v7, v16, T1D); 4737 pmull2(v24, T1Q, v7, v16, T2D); 4738 ldrq(v7, Address(pre(buf, 0x80))); 4739 eor3(v7, T16B, v23, v24, v7); 4740 4741 subs(len, len, 0x80); 4742 br(Assembler::GE, CRC_by128_loop); 4743 4744 // fold into 512 bits 4745 // Use v31 for constants because v16 can be still in use. 4746 ldrq(v31, Address(table, 0x10)); 4747 4748 pmull (v17, T1Q, v0, v31, T1D); 4749 pmull2(v18, T1Q, v0, v31, T2D); 4750 eor3(v0, T16B, v17, v18, v4); 4751 4752 pmull (v19, T1Q, v1, v31, T1D); 4753 pmull2(v20, T1Q, v1, v31, T2D); 4754 eor3(v1, T16B, v19, v20, v5); 4755 4756 pmull (v21, T1Q, v2, v31, T1D); 4757 pmull2(v22, T1Q, v2, v31, T2D); 4758 eor3(v2, T16B, v21, v22, v6); 4759 4760 pmull (v23, T1Q, v3, v31, T1D); 4761 pmull2(v24, T1Q, v3, v31, T2D); 4762 eor3(v3, T16B, v23, v24, v7); 4763 4764 // fold into 128 bits 4765 // Use v17 for constants because v31 can be still in use. 4766 ldrq(v17, Address(table, 0x20)); 4767 pmull (v25, T1Q, v0, v17, T1D); 4768 pmull2(v26, T1Q, v0, v17, T2D); 4769 eor3(v3, T16B, v3, v25, v26); 4770 4771 // Use v18 for constants because v17 can be still in use. 4772 ldrq(v18, Address(table, 0x30)); 4773 pmull (v27, T1Q, v1, v18, T1D); 4774 pmull2(v28, T1Q, v1, v18, T2D); 4775 eor3(v3, T16B, v3, v27, v28); 4776 4777 // Use v19 for constants because v18 can be still in use. 4778 ldrq(v19, Address(table, 0x40)); 4779 pmull (v29, T1Q, v2, v19, T1D); 4780 pmull2(v30, T1Q, v2, v19, T2D); 4781 eor3(v0, T16B, v3, v29, v30); 4782 4783 add(len, len, 0x80); 4784 add(buf, buf, 0x10); 4785 4786 mov(tmp0, v0, D, 0); 4787 mov(tmp1, v0, D, 1); 4788 } 4789 4790 SkipIfEqual::SkipIfEqual( 4791 MacroAssembler* masm, const bool* flag_addr, bool value) { 4792 _masm = masm; 4793 uint64_t offset; 4794 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4795 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4796 if (value) { 4797 _masm->cbnzw(rscratch1, _label); 4798 } else { 4799 _masm->cbzw(rscratch1, _label); 4800 } 4801 } 4802 4803 SkipIfEqual::~SkipIfEqual() { 4804 _masm->bind(_label); 4805 } 4806 4807 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4808 Address adr; 4809 switch(dst.getMode()) { 4810 case Address::base_plus_offset: 4811 // This is the expected mode, although we allow all the other 4812 // forms below. 4813 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4814 break; 4815 default: 4816 lea(rscratch2, dst); 4817 adr = Address(rscratch2); 4818 break; 4819 } 4820 ldr(rscratch1, adr); 4821 add(rscratch1, rscratch1, src); 4822 str(rscratch1, adr); 4823 } 4824 4825 void MacroAssembler::cmpptr(Register src1, Address src2) { 4826 uint64_t offset; 4827 adrp(rscratch1, src2, offset); 4828 ldr(rscratch1, Address(rscratch1, offset)); 4829 cmp(src1, rscratch1); 4830 } 4831 4832 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4833 cmp(obj1, obj2); 4834 } 4835 4836 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4837 load_method_holder(rresult, rmethod); 4838 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4839 } 4840 4841 void MacroAssembler::load_method_holder(Register holder, Register method) { 4842 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4843 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4844 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4845 } 4846 4847 void MacroAssembler::load_klass(Register dst, Register src) { 4848 if (UseCompressedClassPointers) { 4849 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4850 decode_klass_not_null(dst); 4851 } else { 4852 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4853 } 4854 } 4855 4856 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4857 if (RestoreMXCSROnJNICalls) { 4858 Label OK; 4859 get_fpcr(tmp1); 4860 mov(tmp2, tmp1); 4861 // Set FPCR to the state we need. We do want Round to Nearest. We 4862 // don't want non-IEEE rounding modes or floating-point traps. 4863 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4864 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4865 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4866 eor(tmp2, tmp1, tmp2); 4867 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4868 set_fpcr(tmp1); 4869 bind(OK); 4870 } 4871 } 4872 4873 // ((OopHandle)result).resolve(); 4874 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4875 // OopHandle::resolve is an indirection. 4876 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4877 } 4878 4879 // ((WeakHandle)result).resolve(); 4880 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4881 assert_different_registers(result, tmp1, tmp2); 4882 Label resolved; 4883 4884 // A null weak handle resolves to null. 4885 cbz(result, resolved); 4886 4887 // Only 64 bit platforms support GCs that require a tmp register 4888 // WeakHandle::resolve is an indirection like jweak. 4889 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4890 result, Address(result), tmp1, tmp2); 4891 bind(resolved); 4892 } 4893 4894 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4895 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4896 ldr(dst, Address(rmethod, Method::const_offset())); 4897 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4898 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4899 ldr(dst, Address(dst, mirror_offset)); 4900 resolve_oop_handle(dst, tmp1, tmp2); 4901 } 4902 4903 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4904 if (UseCompressedClassPointers) { 4905 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4906 if (CompressedKlassPointers::base() == nullptr) { 4907 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4908 return; 4909 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4910 && CompressedKlassPointers::shift() == 0) { 4911 // Only the bottom 32 bits matter 4912 cmpw(trial_klass, tmp); 4913 return; 4914 } 4915 decode_klass_not_null(tmp); 4916 } else { 4917 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4918 } 4919 cmp(trial_klass, tmp); 4920 } 4921 4922 void MacroAssembler::store_klass(Register dst, Register src) { 4923 // FIXME: Should this be a store release? concurrent gcs assumes 4924 // klass length is valid if klass field is not null. 4925 if (UseCompressedClassPointers) { 4926 encode_klass_not_null(src); 4927 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4928 } else { 4929 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4930 } 4931 } 4932 4933 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4934 if (UseCompressedClassPointers) { 4935 // Store to klass gap in destination 4936 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4937 } 4938 } 4939 4940 // Algorithm must match CompressedOops::encode. 4941 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4942 #ifdef ASSERT 4943 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4944 #endif 4945 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4946 if (CompressedOops::base() == nullptr) { 4947 if (CompressedOops::shift() != 0) { 4948 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4949 lsr(d, s, LogMinObjAlignmentInBytes); 4950 } else { 4951 mov(d, s); 4952 } 4953 } else { 4954 subs(d, s, rheapbase); 4955 csel(d, d, zr, Assembler::HS); 4956 lsr(d, d, LogMinObjAlignmentInBytes); 4957 4958 /* Old algorithm: is this any worse? 4959 Label nonnull; 4960 cbnz(r, nonnull); 4961 sub(r, r, rheapbase); 4962 bind(nonnull); 4963 lsr(r, r, LogMinObjAlignmentInBytes); 4964 */ 4965 } 4966 } 4967 4968 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4969 #ifdef ASSERT 4970 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4971 if (CheckCompressedOops) { 4972 Label ok; 4973 cbnz(r, ok); 4974 stop("null oop passed to encode_heap_oop_not_null"); 4975 bind(ok); 4976 } 4977 #endif 4978 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 4979 if (CompressedOops::base() != nullptr) { 4980 sub(r, r, rheapbase); 4981 } 4982 if (CompressedOops::shift() != 0) { 4983 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4984 lsr(r, r, LogMinObjAlignmentInBytes); 4985 } 4986 } 4987 4988 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 4989 #ifdef ASSERT 4990 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 4991 if (CheckCompressedOops) { 4992 Label ok; 4993 cbnz(src, ok); 4994 stop("null oop passed to encode_heap_oop_not_null2"); 4995 bind(ok); 4996 } 4997 #endif 4998 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 4999 5000 Register data = src; 5001 if (CompressedOops::base() != nullptr) { 5002 sub(dst, src, rheapbase); 5003 data = dst; 5004 } 5005 if (CompressedOops::shift() != 0) { 5006 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5007 lsr(dst, data, LogMinObjAlignmentInBytes); 5008 data = dst; 5009 } 5010 if (data == src) 5011 mov(dst, src); 5012 } 5013 5014 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5015 #ifdef ASSERT 5016 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5017 #endif 5018 if (CompressedOops::base() == nullptr) { 5019 if (CompressedOops::shift() != 0 || d != s) { 5020 lsl(d, s, CompressedOops::shift()); 5021 } 5022 } else { 5023 Label done; 5024 if (d != s) 5025 mov(d, s); 5026 cbz(s, done); 5027 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5028 bind(done); 5029 } 5030 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5031 } 5032 5033 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5034 assert (UseCompressedOops, "should only be used for compressed headers"); 5035 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5036 // Cannot assert, unverified entry point counts instructions (see .ad file) 5037 // vtableStubs also counts instructions in pd_code_size_limit. 5038 // Also do not verify_oop as this is called by verify_oop. 5039 if (CompressedOops::shift() != 0) { 5040 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5041 if (CompressedOops::base() != nullptr) { 5042 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5043 } else { 5044 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5045 } 5046 } else { 5047 assert (CompressedOops::base() == nullptr, "sanity"); 5048 } 5049 } 5050 5051 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5052 assert (UseCompressedOops, "should only be used for compressed headers"); 5053 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5054 // Cannot assert, unverified entry point counts instructions (see .ad file) 5055 // vtableStubs also counts instructions in pd_code_size_limit. 5056 // Also do not verify_oop as this is called by verify_oop. 5057 if (CompressedOops::shift() != 0) { 5058 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5059 if (CompressedOops::base() != nullptr) { 5060 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5061 } else { 5062 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5063 } 5064 } else { 5065 assert (CompressedOops::base() == nullptr, "sanity"); 5066 if (dst != src) { 5067 mov(dst, src); 5068 } 5069 } 5070 } 5071 5072 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5073 5074 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5075 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5076 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5077 5078 if (_klass_decode_mode != KlassDecodeNone) { 5079 return _klass_decode_mode; 5080 } 5081 5082 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 5083 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 5084 5085 if (CompressedKlassPointers::base() == nullptr) { 5086 return (_klass_decode_mode = KlassDecodeZero); 5087 } 5088 5089 if (operand_valid_for_logical_immediate( 5090 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 5091 const uint64_t range_mask = 5092 (1ULL << log2i(CompressedKlassPointers::range())) - 1; 5093 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 5094 return (_klass_decode_mode = KlassDecodeXor); 5095 } 5096 } 5097 5098 const uint64_t shifted_base = 5099 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5100 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 5101 "compressed class base bad alignment"); 5102 5103 return (_klass_decode_mode = KlassDecodeMovk); 5104 } 5105 5106 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5107 switch (klass_decode_mode()) { 5108 case KlassDecodeZero: 5109 if (CompressedKlassPointers::shift() != 0) { 5110 lsr(dst, src, LogKlassAlignmentInBytes); 5111 } else { 5112 if (dst != src) mov(dst, src); 5113 } 5114 break; 5115 5116 case KlassDecodeXor: 5117 if (CompressedKlassPointers::shift() != 0) { 5118 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5119 lsr(dst, dst, LogKlassAlignmentInBytes); 5120 } else { 5121 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5122 } 5123 break; 5124 5125 case KlassDecodeMovk: 5126 if (CompressedKlassPointers::shift() != 0) { 5127 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 5128 } else { 5129 movw(dst, src); 5130 } 5131 break; 5132 5133 case KlassDecodeNone: 5134 ShouldNotReachHere(); 5135 break; 5136 } 5137 } 5138 5139 void MacroAssembler::encode_klass_not_null(Register r) { 5140 encode_klass_not_null(r, r); 5141 } 5142 5143 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5144 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5145 5146 switch (klass_decode_mode()) { 5147 case KlassDecodeZero: 5148 if (CompressedKlassPointers::shift() != 0) { 5149 lsl(dst, src, LogKlassAlignmentInBytes); 5150 } else { 5151 if (dst != src) mov(dst, src); 5152 } 5153 break; 5154 5155 case KlassDecodeXor: 5156 if (CompressedKlassPointers::shift() != 0) { 5157 lsl(dst, src, LogKlassAlignmentInBytes); 5158 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5159 } else { 5160 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5161 } 5162 break; 5163 5164 case KlassDecodeMovk: { 5165 const uint64_t shifted_base = 5166 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5167 5168 if (dst != src) movw(dst, src); 5169 movk(dst, shifted_base >> 32, 32); 5170 5171 if (CompressedKlassPointers::shift() != 0) { 5172 lsl(dst, dst, LogKlassAlignmentInBytes); 5173 } 5174 5175 break; 5176 } 5177 5178 case KlassDecodeNone: 5179 ShouldNotReachHere(); 5180 break; 5181 } 5182 } 5183 5184 void MacroAssembler::decode_klass_not_null(Register r) { 5185 decode_klass_not_null(r, r); 5186 } 5187 5188 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5189 #ifdef ASSERT 5190 { 5191 ThreadInVMfromUnknown tiv; 5192 assert (UseCompressedOops, "should only be used for compressed oops"); 5193 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5194 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5195 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5196 } 5197 #endif 5198 int oop_index = oop_recorder()->find_index(obj); 5199 InstructionMark im(this); 5200 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5201 code_section()->relocate(inst_mark(), rspec); 5202 movz(dst, 0xDEAD, 16); 5203 movk(dst, 0xBEEF); 5204 } 5205 5206 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5207 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5208 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5209 int index = oop_recorder()->find_index(k); 5210 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5211 5212 InstructionMark im(this); 5213 RelocationHolder rspec = metadata_Relocation::spec(index); 5214 code_section()->relocate(inst_mark(), rspec); 5215 narrowKlass nk = CompressedKlassPointers::encode(k); 5216 movz(dst, (nk >> 16), 16); 5217 movk(dst, nk & 0xffff); 5218 } 5219 5220 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5221 Register dst, Address src, 5222 Register tmp1, Register tmp2) { 5223 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5224 decorators = AccessInternal::decorator_fixup(decorators, type); 5225 bool as_raw = (decorators & AS_RAW) != 0; 5226 if (as_raw) { 5227 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5228 } else { 5229 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5230 } 5231 } 5232 5233 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5234 Address dst, Register val, 5235 Register tmp1, Register tmp2, Register tmp3) { 5236 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5237 decorators = AccessInternal::decorator_fixup(decorators, type); 5238 bool as_raw = (decorators & AS_RAW) != 0; 5239 if (as_raw) { 5240 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5241 } else { 5242 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5243 } 5244 } 5245 5246 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5247 Register tmp2, DecoratorSet decorators) { 5248 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5249 } 5250 5251 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5252 Register tmp2, DecoratorSet decorators) { 5253 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5254 } 5255 5256 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5257 Register tmp2, Register tmp3, DecoratorSet decorators) { 5258 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5259 } 5260 5261 // Used for storing nulls. 5262 void MacroAssembler::store_heap_oop_null(Address dst) { 5263 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5264 } 5265 5266 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5267 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5268 int index = oop_recorder()->allocate_metadata_index(obj); 5269 RelocationHolder rspec = metadata_Relocation::spec(index); 5270 return Address((address)obj, rspec); 5271 } 5272 5273 // Move an oop into a register. 5274 void MacroAssembler::movoop(Register dst, jobject obj) { 5275 int oop_index; 5276 if (obj == nullptr) { 5277 oop_index = oop_recorder()->allocate_oop_index(obj); 5278 } else { 5279 #ifdef ASSERT 5280 { 5281 ThreadInVMfromUnknown tiv; 5282 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5283 } 5284 #endif 5285 oop_index = oop_recorder()->find_index(obj); 5286 } 5287 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5288 5289 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5290 mov(dst, Address((address)obj, rspec)); 5291 } else { 5292 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5293 ldr_constant(dst, Address(dummy, rspec)); 5294 } 5295 5296 } 5297 5298 // Move a metadata address into a register. 5299 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5300 int oop_index; 5301 if (obj == nullptr) { 5302 oop_index = oop_recorder()->allocate_metadata_index(obj); 5303 } else { 5304 oop_index = oop_recorder()->find_index(obj); 5305 } 5306 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5307 mov(dst, Address((address)obj, rspec)); 5308 } 5309 5310 Address MacroAssembler::constant_oop_address(jobject obj) { 5311 #ifdef ASSERT 5312 { 5313 ThreadInVMfromUnknown tiv; 5314 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5315 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5316 } 5317 #endif 5318 int oop_index = oop_recorder()->find_index(obj); 5319 return Address((address)obj, oop_Relocation::spec(oop_index)); 5320 } 5321 5322 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5323 void MacroAssembler::tlab_allocate(Register obj, 5324 Register var_size_in_bytes, 5325 int con_size_in_bytes, 5326 Register t1, 5327 Register t2, 5328 Label& slow_case) { 5329 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5330 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5331 } 5332 5333 void MacroAssembler::verify_tlab() { 5334 #ifdef ASSERT 5335 if (UseTLAB && VerifyOops) { 5336 Label next, ok; 5337 5338 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5339 5340 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5341 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5342 cmp(rscratch2, rscratch1); 5343 br(Assembler::HS, next); 5344 STOP("assert(top >= start)"); 5345 should_not_reach_here(); 5346 5347 bind(next); 5348 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5349 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5350 cmp(rscratch2, rscratch1); 5351 br(Assembler::HS, ok); 5352 STOP("assert(top <= end)"); 5353 should_not_reach_here(); 5354 5355 bind(ok); 5356 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5357 } 5358 #endif 5359 } 5360 5361 // Writes to stack successive pages until offset reached to check for 5362 // stack overflow + shadow pages. This clobbers tmp. 5363 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5364 assert_different_registers(tmp, size, rscratch1); 5365 mov(tmp, sp); 5366 // Bang stack for total size given plus shadow page size. 5367 // Bang one page at a time because large size can bang beyond yellow and 5368 // red zones. 5369 Label loop; 5370 mov(rscratch1, (int)os::vm_page_size()); 5371 bind(loop); 5372 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5373 subsw(size, size, rscratch1); 5374 str(size, Address(tmp)); 5375 br(Assembler::GT, loop); 5376 5377 // Bang down shadow pages too. 5378 // At this point, (tmp-0) is the last address touched, so don't 5379 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5380 // was post-decremented.) Skip this address by starting at i=1, and 5381 // touch a few more pages below. N.B. It is important to touch all 5382 // the way down to and including i=StackShadowPages. 5383 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5384 // this could be any sized move but this is can be a debugging crumb 5385 // so the bigger the better. 5386 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5387 str(size, Address(tmp)); 5388 } 5389 } 5390 5391 // Move the address of the polling page into dest. 5392 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5393 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5394 } 5395 5396 // Read the polling page. The address of the polling page must 5397 // already be in r. 5398 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5399 address mark; 5400 { 5401 InstructionMark im(this); 5402 code_section()->relocate(inst_mark(), rtype); 5403 ldrw(zr, Address(r, 0)); 5404 mark = inst_mark(); 5405 } 5406 verify_cross_modify_fence_not_required(); 5407 return mark; 5408 } 5409 5410 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5411 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5412 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5413 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5414 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5415 int64_t offset_low = dest_page - low_page; 5416 int64_t offset_high = dest_page - high_page; 5417 5418 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5419 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5420 5421 InstructionMark im(this); 5422 code_section()->relocate(inst_mark(), dest.rspec()); 5423 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5424 // the code cache so that if it is relocated we know it will still reach 5425 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5426 _adrp(reg1, dest.target()); 5427 } else { 5428 uint64_t target = (uint64_t)dest.target(); 5429 uint64_t adrp_target 5430 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5431 5432 _adrp(reg1, (address)adrp_target); 5433 movk(reg1, target >> 32, 32); 5434 } 5435 byte_offset = (uint64_t)dest.target() & 0xfff; 5436 } 5437 5438 void MacroAssembler::load_byte_map_base(Register reg) { 5439 CardTable::CardValue* byte_map_base = 5440 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5441 5442 // Strictly speaking the byte_map_base isn't an address at all, and it might 5443 // even be negative. It is thus materialised as a constant. 5444 if (SCCache::is_on_for_write()) { 5445 // SCA needs relocation info for card table base 5446 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base))); 5447 } else { 5448 mov(reg, (uint64_t)byte_map_base); 5449 } 5450 } 5451 5452 void MacroAssembler::build_frame(int framesize) { 5453 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5454 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5455 protect_return_address(); 5456 if (framesize < ((1 << 9) + 2 * wordSize)) { 5457 sub(sp, sp, framesize); 5458 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5459 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5460 } else { 5461 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5462 if (PreserveFramePointer) mov(rfp, sp); 5463 if (framesize < ((1 << 12) + 2 * wordSize)) 5464 sub(sp, sp, framesize - 2 * wordSize); 5465 else { 5466 mov(rscratch1, framesize - 2 * wordSize); 5467 sub(sp, sp, rscratch1); 5468 } 5469 } 5470 verify_cross_modify_fence_not_required(); 5471 } 5472 5473 void MacroAssembler::remove_frame(int framesize) { 5474 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5475 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5476 if (framesize < ((1 << 9) + 2 * wordSize)) { 5477 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5478 add(sp, sp, framesize); 5479 } else { 5480 if (framesize < ((1 << 12) + 2 * wordSize)) 5481 add(sp, sp, framesize - 2 * wordSize); 5482 else { 5483 mov(rscratch1, framesize - 2 * wordSize); 5484 add(sp, sp, rscratch1); 5485 } 5486 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5487 } 5488 authenticate_return_address(); 5489 } 5490 5491 5492 // This method counts leading positive bytes (highest bit not set) in provided byte array 5493 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5494 // Simple and most common case of aligned small array which is not at the 5495 // end of memory page is placed here. All other cases are in stub. 5496 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5497 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5498 assert_different_registers(ary1, len, result); 5499 5500 mov(result, len); 5501 cmpw(len, 0); 5502 br(LE, DONE); 5503 cmpw(len, 4 * wordSize); 5504 br(GE, STUB_LONG); // size > 32 then go to stub 5505 5506 int shift = 64 - exact_log2(os::vm_page_size()); 5507 lsl(rscratch1, ary1, shift); 5508 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5509 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5510 br(CS, STUB); // at the end of page then go to stub 5511 subs(len, len, wordSize); 5512 br(LT, END); 5513 5514 BIND(LOOP); 5515 ldr(rscratch1, Address(post(ary1, wordSize))); 5516 tst(rscratch1, UPPER_BIT_MASK); 5517 br(NE, SET_RESULT); 5518 subs(len, len, wordSize); 5519 br(GE, LOOP); 5520 cmpw(len, -wordSize); 5521 br(EQ, DONE); 5522 5523 BIND(END); 5524 ldr(rscratch1, Address(ary1)); 5525 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5526 lslv(rscratch1, rscratch1, rscratch2); 5527 tst(rscratch1, UPPER_BIT_MASK); 5528 br(NE, SET_RESULT); 5529 b(DONE); 5530 5531 BIND(STUB); 5532 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5533 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5534 address tpc1 = trampoline_call(count_pos); 5535 if (tpc1 == nullptr) { 5536 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5537 postcond(pc() == badAddress); 5538 return nullptr; 5539 } 5540 b(DONE); 5541 5542 BIND(STUB_LONG); 5543 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5544 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5545 address tpc2 = trampoline_call(count_pos_long); 5546 if (tpc2 == nullptr) { 5547 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5548 postcond(pc() == badAddress); 5549 return nullptr; 5550 } 5551 b(DONE); 5552 5553 BIND(SET_RESULT); 5554 5555 add(len, len, wordSize); 5556 sub(result, result, len); 5557 5558 BIND(DONE); 5559 postcond(pc() != badAddress); 5560 return pc(); 5561 } 5562 5563 // Clobbers: rscratch1, rscratch2, rflags 5564 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5565 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5566 Register tmp4, Register tmp5, Register result, 5567 Register cnt1, int elem_size) { 5568 Label DONE, SAME; 5569 Register tmp1 = rscratch1; 5570 Register tmp2 = rscratch2; 5571 int elem_per_word = wordSize/elem_size; 5572 int log_elem_size = exact_log2(elem_size); 5573 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5574 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5575 int base_offset 5576 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5577 // When the length offset is not aligned to 8 bytes, 5578 // then we align it down. This is valid because the new 5579 // offset will always be the klass which is the same 5580 // for type arrays. 5581 int start_offset = align_down(length_offset, BytesPerWord); 5582 int extra_length = base_offset - start_offset; 5583 assert(start_offset == length_offset || start_offset == klass_offset, 5584 "start offset must be 8-byte-aligned or be the klass offset"); 5585 assert(base_offset != start_offset, "must include the length field"); 5586 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5587 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5588 5589 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5590 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5591 5592 #ifndef PRODUCT 5593 { 5594 const char kind = (elem_size == 2) ? 'U' : 'L'; 5595 char comment[64]; 5596 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5597 BLOCK_COMMENT(comment); 5598 } 5599 #endif 5600 5601 // if (a1 == a2) 5602 // return true; 5603 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5604 br(EQ, SAME); 5605 5606 if (UseSimpleArrayEquals) { 5607 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5608 // if (a1 == nullptr || a2 == nullptr) 5609 // return false; 5610 // a1 & a2 == 0 means (some-pointer is null) or 5611 // (very-rare-or-even-probably-impossible-pointer-values) 5612 // so, we can save one branch in most cases 5613 tst(a1, a2); 5614 mov(result, false); 5615 br(EQ, A_MIGHT_BE_NULL); 5616 // if (a1.length != a2.length) 5617 // return false; 5618 bind(A_IS_NOT_NULL); 5619 ldrw(cnt1, Address(a1, length_offset)); 5620 // Increase loop counter by diff between base- and actual start-offset. 5621 addw(cnt1, cnt1, extra_length); 5622 lea(a1, Address(a1, start_offset)); 5623 lea(a2, Address(a2, start_offset)); 5624 // Check for short strings, i.e. smaller than wordSize. 5625 subs(cnt1, cnt1, elem_per_word); 5626 br(Assembler::LT, SHORT); 5627 // Main 8 byte comparison loop. 5628 bind(NEXT_WORD); { 5629 ldr(tmp1, Address(post(a1, wordSize))); 5630 ldr(tmp2, Address(post(a2, wordSize))); 5631 subs(cnt1, cnt1, elem_per_word); 5632 eor(tmp5, tmp1, tmp2); 5633 cbnz(tmp5, DONE); 5634 } br(GT, NEXT_WORD); 5635 // Last longword. In the case where length == 4 we compare the 5636 // same longword twice, but that's still faster than another 5637 // conditional branch. 5638 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5639 // length == 4. 5640 if (log_elem_size > 0) 5641 lsl(cnt1, cnt1, log_elem_size); 5642 ldr(tmp3, Address(a1, cnt1)); 5643 ldr(tmp4, Address(a2, cnt1)); 5644 eor(tmp5, tmp3, tmp4); 5645 cbnz(tmp5, DONE); 5646 b(SAME); 5647 bind(A_MIGHT_BE_NULL); 5648 // in case both a1 and a2 are not-null, proceed with loads 5649 cbz(a1, DONE); 5650 cbz(a2, DONE); 5651 b(A_IS_NOT_NULL); 5652 bind(SHORT); 5653 5654 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5655 { 5656 ldrw(tmp1, Address(post(a1, 4))); 5657 ldrw(tmp2, Address(post(a2, 4))); 5658 eorw(tmp5, tmp1, tmp2); 5659 cbnzw(tmp5, DONE); 5660 } 5661 bind(TAIL03); 5662 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5663 { 5664 ldrh(tmp3, Address(post(a1, 2))); 5665 ldrh(tmp4, Address(post(a2, 2))); 5666 eorw(tmp5, tmp3, tmp4); 5667 cbnzw(tmp5, DONE); 5668 } 5669 bind(TAIL01); 5670 if (elem_size == 1) { // Only needed when comparing byte arrays. 5671 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5672 { 5673 ldrb(tmp1, a1); 5674 ldrb(tmp2, a2); 5675 eorw(tmp5, tmp1, tmp2); 5676 cbnzw(tmp5, DONE); 5677 } 5678 } 5679 } else { 5680 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5681 CSET_EQ, LAST_CHECK; 5682 mov(result, false); 5683 cbz(a1, DONE); 5684 ldrw(cnt1, Address(a1, length_offset)); 5685 cbz(a2, DONE); 5686 // Increase loop counter by diff between base- and actual start-offset. 5687 addw(cnt1, cnt1, extra_length); 5688 5689 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5690 // faster to perform another branch before comparing a1 and a2 5691 cmp(cnt1, (u1)elem_per_word); 5692 br(LE, SHORT); // short or same 5693 ldr(tmp3, Address(pre(a1, start_offset))); 5694 subs(zr, cnt1, stubBytesThreshold); 5695 br(GE, STUB); 5696 ldr(tmp4, Address(pre(a2, start_offset))); 5697 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5698 5699 // Main 16 byte comparison loop with 2 exits 5700 bind(NEXT_DWORD); { 5701 ldr(tmp1, Address(pre(a1, wordSize))); 5702 ldr(tmp2, Address(pre(a2, wordSize))); 5703 subs(cnt1, cnt1, 2 * elem_per_word); 5704 br(LE, TAIL); 5705 eor(tmp4, tmp3, tmp4); 5706 cbnz(tmp4, DONE); 5707 ldr(tmp3, Address(pre(a1, wordSize))); 5708 ldr(tmp4, Address(pre(a2, wordSize))); 5709 cmp(cnt1, (u1)elem_per_word); 5710 br(LE, TAIL2); 5711 cmp(tmp1, tmp2); 5712 } br(EQ, NEXT_DWORD); 5713 b(DONE); 5714 5715 bind(TAIL); 5716 eor(tmp4, tmp3, tmp4); 5717 eor(tmp2, tmp1, tmp2); 5718 lslv(tmp2, tmp2, tmp5); 5719 orr(tmp5, tmp4, tmp2); 5720 cmp(tmp5, zr); 5721 b(CSET_EQ); 5722 5723 bind(TAIL2); 5724 eor(tmp2, tmp1, tmp2); 5725 cbnz(tmp2, DONE); 5726 b(LAST_CHECK); 5727 5728 bind(STUB); 5729 ldr(tmp4, Address(pre(a2, start_offset))); 5730 if (elem_size == 2) { // convert to byte counter 5731 lsl(cnt1, cnt1, 1); 5732 } 5733 eor(tmp5, tmp3, tmp4); 5734 cbnz(tmp5, DONE); 5735 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5736 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5737 address tpc = trampoline_call(stub); 5738 if (tpc == nullptr) { 5739 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5740 postcond(pc() == badAddress); 5741 return nullptr; 5742 } 5743 b(DONE); 5744 5745 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5746 // so, if a2 == null => return false(0), else return true, so we can return a2 5747 mov(result, a2); 5748 b(DONE); 5749 bind(SHORT); 5750 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5751 ldr(tmp3, Address(a1, start_offset)); 5752 ldr(tmp4, Address(a2, start_offset)); 5753 bind(LAST_CHECK); 5754 eor(tmp4, tmp3, tmp4); 5755 lslv(tmp5, tmp4, tmp5); 5756 cmp(tmp5, zr); 5757 bind(CSET_EQ); 5758 cset(result, EQ); 5759 b(DONE); 5760 } 5761 5762 bind(SAME); 5763 mov(result, true); 5764 // That's it. 5765 bind(DONE); 5766 5767 BLOCK_COMMENT("} array_equals"); 5768 postcond(pc() != badAddress); 5769 return pc(); 5770 } 5771 5772 // Compare Strings 5773 5774 // For Strings we're passed the address of the first characters in a1 5775 // and a2 and the length in cnt1. 5776 // There are two implementations. For arrays >= 8 bytes, all 5777 // comparisons (including the final one, which may overlap) are 5778 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5779 // halfword, then a short, and then a byte. 5780 5781 void MacroAssembler::string_equals(Register a1, Register a2, 5782 Register result, Register cnt1) 5783 { 5784 Label SAME, DONE, SHORT, NEXT_WORD; 5785 Register tmp1 = rscratch1; 5786 Register tmp2 = rscratch2; 5787 Register cnt2 = tmp2; // cnt2 only used in array length compare 5788 5789 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5790 5791 #ifndef PRODUCT 5792 { 5793 char comment[64]; 5794 snprintf(comment, sizeof comment, "{string_equalsL"); 5795 BLOCK_COMMENT(comment); 5796 } 5797 #endif 5798 5799 mov(result, false); 5800 5801 // Check for short strings, i.e. smaller than wordSize. 5802 subs(cnt1, cnt1, wordSize); 5803 br(Assembler::LT, SHORT); 5804 // Main 8 byte comparison loop. 5805 bind(NEXT_WORD); { 5806 ldr(tmp1, Address(post(a1, wordSize))); 5807 ldr(tmp2, Address(post(a2, wordSize))); 5808 subs(cnt1, cnt1, wordSize); 5809 eor(tmp1, tmp1, tmp2); 5810 cbnz(tmp1, DONE); 5811 } br(GT, NEXT_WORD); 5812 // Last longword. In the case where length == 4 we compare the 5813 // same longword twice, but that's still faster than another 5814 // conditional branch. 5815 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5816 // length == 4. 5817 ldr(tmp1, Address(a1, cnt1)); 5818 ldr(tmp2, Address(a2, cnt1)); 5819 eor(tmp2, tmp1, tmp2); 5820 cbnz(tmp2, DONE); 5821 b(SAME); 5822 5823 bind(SHORT); 5824 Label TAIL03, TAIL01; 5825 5826 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5827 { 5828 ldrw(tmp1, Address(post(a1, 4))); 5829 ldrw(tmp2, Address(post(a2, 4))); 5830 eorw(tmp1, tmp1, tmp2); 5831 cbnzw(tmp1, DONE); 5832 } 5833 bind(TAIL03); 5834 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5835 { 5836 ldrh(tmp1, Address(post(a1, 2))); 5837 ldrh(tmp2, Address(post(a2, 2))); 5838 eorw(tmp1, tmp1, tmp2); 5839 cbnzw(tmp1, DONE); 5840 } 5841 bind(TAIL01); 5842 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5843 { 5844 ldrb(tmp1, a1); 5845 ldrb(tmp2, a2); 5846 eorw(tmp1, tmp1, tmp2); 5847 cbnzw(tmp1, DONE); 5848 } 5849 // Arrays are equal. 5850 bind(SAME); 5851 mov(result, true); 5852 5853 // That's it. 5854 bind(DONE); 5855 BLOCK_COMMENT("} string_equals"); 5856 } 5857 5858 5859 // The size of the blocks erased by the zero_blocks stub. We must 5860 // handle anything smaller than this ourselves in zero_words(). 5861 const int MacroAssembler::zero_words_block_size = 8; 5862 5863 // zero_words() is used by C2 ClearArray patterns and by 5864 // C1_MacroAssembler. It is as small as possible, handling small word 5865 // counts locally and delegating anything larger to the zero_blocks 5866 // stub. It is expanded many times in compiled code, so it is 5867 // important to keep it short. 5868 5869 // ptr: Address of a buffer to be zeroed. 5870 // cnt: Count in HeapWords. 5871 // 5872 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5873 address MacroAssembler::zero_words(Register ptr, Register cnt) 5874 { 5875 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5876 5877 BLOCK_COMMENT("zero_words {"); 5878 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5879 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5880 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5881 5882 subs(rscratch1, cnt, zero_words_block_size); 5883 Label around; 5884 br(LO, around); 5885 { 5886 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5887 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5888 // Make sure this is a C2 compilation. C1 allocates space only for 5889 // trampoline stubs generated by Call LIR ops, and in any case it 5890 // makes sense for a C1 compilation task to proceed as quickly as 5891 // possible. 5892 CompileTask* task; 5893 if (StubRoutines::aarch64::complete() 5894 && Thread::current()->is_Compiler_thread() 5895 && (task = ciEnv::current()->task()) 5896 && is_c2_compile(task->comp_level())) { 5897 address tpc = trampoline_call(zero_blocks); 5898 if (tpc == nullptr) { 5899 DEBUG_ONLY(reset_labels(around)); 5900 return nullptr; 5901 } 5902 } else { 5903 far_call(zero_blocks); 5904 } 5905 } 5906 bind(around); 5907 5908 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5909 // for us. 5910 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5911 Label l; 5912 tbz(cnt, exact_log2(i), l); 5913 for (int j = 0; j < i; j += 2) { 5914 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5915 } 5916 bind(l); 5917 } 5918 { 5919 Label l; 5920 tbz(cnt, 0, l); 5921 str(zr, Address(ptr)); 5922 bind(l); 5923 } 5924 5925 BLOCK_COMMENT("} zero_words"); 5926 return pc(); 5927 } 5928 5929 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5930 // cnt: Immediate count in HeapWords. 5931 // 5932 // r10, r11, rscratch1, and rscratch2 are clobbered. 5933 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5934 { 5935 assert(wordSize <= BlockZeroingLowLimit, 5936 "increase BlockZeroingLowLimit"); 5937 address result = nullptr; 5938 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5939 #ifndef PRODUCT 5940 { 5941 char buf[64]; 5942 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5943 BLOCK_COMMENT(buf); 5944 } 5945 #endif 5946 if (cnt >= 16) { 5947 uint64_t loops = cnt/16; 5948 if (loops > 1) { 5949 mov(rscratch2, loops - 1); 5950 } 5951 { 5952 Label loop; 5953 bind(loop); 5954 for (int i = 0; i < 16; i += 2) { 5955 stp(zr, zr, Address(base, i * BytesPerWord)); 5956 } 5957 add(base, base, 16 * BytesPerWord); 5958 if (loops > 1) { 5959 subs(rscratch2, rscratch2, 1); 5960 br(GE, loop); 5961 } 5962 } 5963 } 5964 cnt %= 16; 5965 int i = cnt & 1; // store any odd word to start 5966 if (i) str(zr, Address(base)); 5967 for (; i < (int)cnt; i += 2) { 5968 stp(zr, zr, Address(base, i * wordSize)); 5969 } 5970 BLOCK_COMMENT("} zero_words"); 5971 result = pc(); 5972 } else { 5973 mov(r10, base); mov(r11, cnt); 5974 result = zero_words(r10, r11); 5975 } 5976 return result; 5977 } 5978 5979 // Zero blocks of memory by using DC ZVA. 5980 // 5981 // Aligns the base address first sufficiently for DC ZVA, then uses 5982 // DC ZVA repeatedly for every full block. cnt is the size to be 5983 // zeroed in HeapWords. Returns the count of words left to be zeroed 5984 // in cnt. 5985 // 5986 // NOTE: This is intended to be used in the zero_blocks() stub. If 5987 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 5988 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 5989 Register tmp = rscratch1; 5990 Register tmp2 = rscratch2; 5991 int zva_length = VM_Version::zva_length(); 5992 Label initial_table_end, loop_zva; 5993 Label fini; 5994 5995 // Base must be 16 byte aligned. If not just return and let caller handle it 5996 tst(base, 0x0f); 5997 br(Assembler::NE, fini); 5998 // Align base with ZVA length. 5999 neg(tmp, base); 6000 andr(tmp, tmp, zva_length - 1); 6001 6002 // tmp: the number of bytes to be filled to align the base with ZVA length. 6003 add(base, base, tmp); 6004 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6005 adr(tmp2, initial_table_end); 6006 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6007 br(tmp2); 6008 6009 for (int i = -zva_length + 16; i < 0; i += 16) 6010 stp(zr, zr, Address(base, i)); 6011 bind(initial_table_end); 6012 6013 sub(cnt, cnt, zva_length >> 3); 6014 bind(loop_zva); 6015 dc(Assembler::ZVA, base); 6016 subs(cnt, cnt, zva_length >> 3); 6017 add(base, base, zva_length); 6018 br(Assembler::GE, loop_zva); 6019 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6020 bind(fini); 6021 } 6022 6023 // base: Address of a buffer to be filled, 8 bytes aligned. 6024 // cnt: Count in 8-byte unit. 6025 // value: Value to be filled with. 6026 // base will point to the end of the buffer after filling. 6027 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6028 { 6029 // Algorithm: 6030 // 6031 // if (cnt == 0) { 6032 // return; 6033 // } 6034 // if ((p & 8) != 0) { 6035 // *p++ = v; 6036 // } 6037 // 6038 // scratch1 = cnt & 14; 6039 // cnt -= scratch1; 6040 // p += scratch1; 6041 // switch (scratch1 / 2) { 6042 // do { 6043 // cnt -= 16; 6044 // p[-16] = v; 6045 // p[-15] = v; 6046 // case 7: 6047 // p[-14] = v; 6048 // p[-13] = v; 6049 // case 6: 6050 // p[-12] = v; 6051 // p[-11] = v; 6052 // // ... 6053 // case 1: 6054 // p[-2] = v; 6055 // p[-1] = v; 6056 // case 0: 6057 // p += 16; 6058 // } while (cnt); 6059 // } 6060 // if ((cnt & 1) == 1) { 6061 // *p++ = v; 6062 // } 6063 6064 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6065 6066 Label fini, skip, entry, loop; 6067 const int unroll = 8; // Number of stp instructions we'll unroll 6068 6069 cbz(cnt, fini); 6070 tbz(base, 3, skip); 6071 str(value, Address(post(base, 8))); 6072 sub(cnt, cnt, 1); 6073 bind(skip); 6074 6075 andr(rscratch1, cnt, (unroll-1) * 2); 6076 sub(cnt, cnt, rscratch1); 6077 add(base, base, rscratch1, Assembler::LSL, 3); 6078 adr(rscratch2, entry); 6079 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6080 br(rscratch2); 6081 6082 bind(loop); 6083 add(base, base, unroll * 16); 6084 for (int i = -unroll; i < 0; i++) 6085 stp(value, value, Address(base, i * 16)); 6086 bind(entry); 6087 subs(cnt, cnt, unroll * 2); 6088 br(Assembler::GE, loop); 6089 6090 tbz(cnt, 0, fini); 6091 str(value, Address(post(base, 8))); 6092 bind(fini); 6093 } 6094 6095 // Intrinsic for 6096 // 6097 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6098 // return the number of characters copied. 6099 // - java/lang/StringUTF16.compress 6100 // return index of non-latin1 character if copy fails, otherwise 'len'. 6101 // 6102 // This version always returns the number of characters copied, and does not 6103 // clobber the 'len' register. A successful copy will complete with the post- 6104 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6105 // post-condition: 0 <= 'res' < 'len'. 6106 // 6107 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6108 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6109 // beyond the acceptable, even though the footprint would be smaller. 6110 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6111 // avoid additional bloat. 6112 // 6113 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6114 void MacroAssembler::encode_iso_array(Register src, Register dst, 6115 Register len, Register res, bool ascii, 6116 FloatRegister vtmp0, FloatRegister vtmp1, 6117 FloatRegister vtmp2, FloatRegister vtmp3, 6118 FloatRegister vtmp4, FloatRegister vtmp5) 6119 { 6120 Register cnt = res; 6121 Register max = rscratch1; 6122 Register chk = rscratch2; 6123 6124 prfm(Address(src), PLDL1STRM); 6125 movw(cnt, len); 6126 6127 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6128 6129 Label LOOP_32, DONE_32, FAIL_32; 6130 6131 BIND(LOOP_32); 6132 { 6133 cmpw(cnt, 32); 6134 br(LT, DONE_32); 6135 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6136 // Extract lower bytes. 6137 FloatRegister vlo0 = vtmp4; 6138 FloatRegister vlo1 = vtmp5; 6139 uzp1(vlo0, T16B, vtmp0, vtmp1); 6140 uzp1(vlo1, T16B, vtmp2, vtmp3); 6141 // Merge bits... 6142 orr(vtmp0, T16B, vtmp0, vtmp1); 6143 orr(vtmp2, T16B, vtmp2, vtmp3); 6144 // Extract merged upper bytes. 6145 FloatRegister vhix = vtmp0; 6146 uzp2(vhix, T16B, vtmp0, vtmp2); 6147 // ISO-check on hi-parts (all zero). 6148 // ASCII-check on lo-parts (no sign). 6149 FloatRegister vlox = vtmp1; // Merge lower bytes. 6150 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6151 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6152 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6153 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6154 ASCII(orr(chk, chk, max)); 6155 cbnz(chk, FAIL_32); 6156 subw(cnt, cnt, 32); 6157 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6158 b(LOOP_32); 6159 } 6160 BIND(FAIL_32); 6161 sub(src, src, 64); 6162 BIND(DONE_32); 6163 6164 Label LOOP_8, SKIP_8; 6165 6166 BIND(LOOP_8); 6167 { 6168 cmpw(cnt, 8); 6169 br(LT, SKIP_8); 6170 FloatRegister vhi = vtmp0; 6171 FloatRegister vlo = vtmp1; 6172 ld1(vtmp3, T8H, src); 6173 uzp1(vlo, T16B, vtmp3, vtmp3); 6174 uzp2(vhi, T16B, vtmp3, vtmp3); 6175 // ISO-check on hi-parts (all zero). 6176 // ASCII-check on lo-parts (no sign). 6177 ASCII(cm(LT, vtmp2, T16B, vlo)); 6178 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6179 ASCII(umov(max, vtmp2, B, 0)); 6180 ASCII(orr(chk, chk, max)); 6181 cbnz(chk, SKIP_8); 6182 6183 strd(vlo, Address(post(dst, 8))); 6184 subw(cnt, cnt, 8); 6185 add(src, src, 16); 6186 b(LOOP_8); 6187 } 6188 BIND(SKIP_8); 6189 6190 #undef ASCII 6191 6192 Label LOOP, DONE; 6193 6194 cbz(cnt, DONE); 6195 BIND(LOOP); 6196 { 6197 Register chr = rscratch1; 6198 ldrh(chr, Address(post(src, 2))); 6199 tst(chr, ascii ? 0xff80 : 0xff00); 6200 br(NE, DONE); 6201 strb(chr, Address(post(dst, 1))); 6202 subs(cnt, cnt, 1); 6203 br(GT, LOOP); 6204 } 6205 BIND(DONE); 6206 // Return index where we stopped. 6207 subw(res, len, cnt); 6208 } 6209 6210 // Inflate byte[] array to char[]. 6211 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6212 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6213 FloatRegister vtmp1, FloatRegister vtmp2, 6214 FloatRegister vtmp3, Register tmp4) { 6215 Label big, done, after_init, to_stub; 6216 6217 assert_different_registers(src, dst, len, tmp4, rscratch1); 6218 6219 fmovd(vtmp1, 0.0); 6220 lsrw(tmp4, len, 3); 6221 bind(after_init); 6222 cbnzw(tmp4, big); 6223 // Short string: less than 8 bytes. 6224 { 6225 Label loop, tiny; 6226 6227 cmpw(len, 4); 6228 br(LT, tiny); 6229 // Use SIMD to do 4 bytes. 6230 ldrs(vtmp2, post(src, 4)); 6231 zip1(vtmp3, T8B, vtmp2, vtmp1); 6232 subw(len, len, 4); 6233 strd(vtmp3, post(dst, 8)); 6234 6235 cbzw(len, done); 6236 6237 // Do the remaining bytes by steam. 6238 bind(loop); 6239 ldrb(tmp4, post(src, 1)); 6240 strh(tmp4, post(dst, 2)); 6241 subw(len, len, 1); 6242 6243 bind(tiny); 6244 cbnz(len, loop); 6245 6246 b(done); 6247 } 6248 6249 if (SoftwarePrefetchHintDistance >= 0) { 6250 bind(to_stub); 6251 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6252 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6253 address tpc = trampoline_call(stub); 6254 if (tpc == nullptr) { 6255 DEBUG_ONLY(reset_labels(big, done)); 6256 postcond(pc() == badAddress); 6257 return nullptr; 6258 } 6259 b(after_init); 6260 } 6261 6262 // Unpack the bytes 8 at a time. 6263 bind(big); 6264 { 6265 Label loop, around, loop_last, loop_start; 6266 6267 if (SoftwarePrefetchHintDistance >= 0) { 6268 const int large_loop_threshold = (64 + 16)/8; 6269 ldrd(vtmp2, post(src, 8)); 6270 andw(len, len, 7); 6271 cmp(tmp4, (u1)large_loop_threshold); 6272 br(GE, to_stub); 6273 b(loop_start); 6274 6275 bind(loop); 6276 ldrd(vtmp2, post(src, 8)); 6277 bind(loop_start); 6278 subs(tmp4, tmp4, 1); 6279 br(EQ, loop_last); 6280 zip1(vtmp2, T16B, vtmp2, vtmp1); 6281 ldrd(vtmp3, post(src, 8)); 6282 st1(vtmp2, T8H, post(dst, 16)); 6283 subs(tmp4, tmp4, 1); 6284 zip1(vtmp3, T16B, vtmp3, vtmp1); 6285 st1(vtmp3, T8H, post(dst, 16)); 6286 br(NE, loop); 6287 b(around); 6288 bind(loop_last); 6289 zip1(vtmp2, T16B, vtmp2, vtmp1); 6290 st1(vtmp2, T8H, post(dst, 16)); 6291 bind(around); 6292 cbz(len, done); 6293 } else { 6294 andw(len, len, 7); 6295 bind(loop); 6296 ldrd(vtmp2, post(src, 8)); 6297 sub(tmp4, tmp4, 1); 6298 zip1(vtmp3, T16B, vtmp2, vtmp1); 6299 st1(vtmp3, T8H, post(dst, 16)); 6300 cbnz(tmp4, loop); 6301 } 6302 } 6303 6304 // Do the tail of up to 8 bytes. 6305 add(src, src, len); 6306 ldrd(vtmp3, Address(src, -8)); 6307 add(dst, dst, len, ext::uxtw, 1); 6308 zip1(vtmp3, T16B, vtmp3, vtmp1); 6309 strq(vtmp3, Address(dst, -16)); 6310 6311 bind(done); 6312 postcond(pc() != badAddress); 6313 return pc(); 6314 } 6315 6316 // Compress char[] array to byte[]. 6317 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6318 // Return the array length if every element in array can be encoded, 6319 // otherwise, the index of first non-latin1 (> 0xff) character. 6320 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6321 Register res, 6322 FloatRegister tmp0, FloatRegister tmp1, 6323 FloatRegister tmp2, FloatRegister tmp3, 6324 FloatRegister tmp4, FloatRegister tmp5) { 6325 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6326 } 6327 6328 // java.math.round(double a) 6329 // Returns the closest long to the argument, with ties rounding to 6330 // positive infinity. This requires some fiddling for corner 6331 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6332 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6333 FloatRegister ftmp) { 6334 Label DONE; 6335 BLOCK_COMMENT("java_round_double: { "); 6336 fmovd(rscratch1, src); 6337 // Use RoundToNearestTiesAway unless src small and -ve. 6338 fcvtasd(dst, src); 6339 // Test if src >= 0 || abs(src) >= 0x1.0p52 6340 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6341 mov(rscratch2, julong_cast(0x1.0p52)); 6342 cmp(rscratch1, rscratch2); 6343 br(HS, DONE); { 6344 // src < 0 && abs(src) < 0x1.0p52 6345 // src may have a fractional part, so add 0.5 6346 fmovd(ftmp, 0.5); 6347 faddd(ftmp, src, ftmp); 6348 // Convert double to jlong, use RoundTowardsNegative 6349 fcvtmsd(dst, ftmp); 6350 } 6351 bind(DONE); 6352 BLOCK_COMMENT("} java_round_double"); 6353 } 6354 6355 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6356 FloatRegister ftmp) { 6357 Label DONE; 6358 BLOCK_COMMENT("java_round_float: { "); 6359 fmovs(rscratch1, src); 6360 // Use RoundToNearestTiesAway unless src small and -ve. 6361 fcvtassw(dst, src); 6362 // Test if src >= 0 || abs(src) >= 0x1.0p23 6363 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6364 mov(rscratch2, jint_cast(0x1.0p23f)); 6365 cmp(rscratch1, rscratch2); 6366 br(HS, DONE); { 6367 // src < 0 && |src| < 0x1.0p23 6368 // src may have a fractional part, so add 0.5 6369 fmovs(ftmp, 0.5f); 6370 fadds(ftmp, src, ftmp); 6371 // Convert float to jint, use RoundTowardsNegative 6372 fcvtmssw(dst, ftmp); 6373 } 6374 bind(DONE); 6375 BLOCK_COMMENT("} java_round_float"); 6376 } 6377 6378 // get_thread() can be called anywhere inside generated code so we 6379 // need to save whatever non-callee save context might get clobbered 6380 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6381 // the call setup code. 6382 // 6383 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6384 // On other systems, the helper is a usual C function. 6385 // 6386 void MacroAssembler::get_thread(Register dst) { 6387 RegSet saved_regs = 6388 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6389 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6390 6391 protect_return_address(); 6392 push(saved_regs, sp); 6393 6394 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6395 blr(lr); 6396 if (dst != c_rarg0) { 6397 mov(dst, c_rarg0); 6398 } 6399 6400 pop(saved_regs, sp); 6401 authenticate_return_address(); 6402 } 6403 6404 void MacroAssembler::cache_wb(Address line) { 6405 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6406 assert(line.index() == noreg, "index should be noreg"); 6407 assert(line.offset() == 0, "offset should be 0"); 6408 // would like to assert this 6409 // assert(line._ext.shift == 0, "shift should be zero"); 6410 if (VM_Version::supports_dcpop()) { 6411 // writeback using clear virtual address to point of persistence 6412 dc(Assembler::CVAP, line.base()); 6413 } else { 6414 // no need to generate anything as Unsafe.writebackMemory should 6415 // never invoke this stub 6416 } 6417 } 6418 6419 void MacroAssembler::cache_wbsync(bool is_pre) { 6420 // we only need a barrier post sync 6421 if (!is_pre) { 6422 membar(Assembler::AnyAny); 6423 } 6424 } 6425 6426 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6427 // Make sure that native code does not change SVE vector length. 6428 if (!UseSVE) return; 6429 Label verify_ok; 6430 movw(tmp, zr); 6431 sve_inc(tmp, B); 6432 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6433 br(EQ, verify_ok); 6434 stop("Error: SVE vector length has changed since jvm startup"); 6435 bind(verify_ok); 6436 } 6437 6438 void MacroAssembler::verify_ptrue() { 6439 Label verify_ok; 6440 if (!UseSVE) { 6441 return; 6442 } 6443 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6444 sve_dec(rscratch1, B); 6445 cbz(rscratch1, verify_ok); 6446 stop("Error: the preserved predicate register (p7) elements are not all true"); 6447 bind(verify_ok); 6448 } 6449 6450 void MacroAssembler::safepoint_isb() { 6451 isb(); 6452 #ifndef PRODUCT 6453 if (VerifyCrossModifyFence) { 6454 // Clear the thread state. 6455 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6456 } 6457 #endif 6458 } 6459 6460 #ifndef PRODUCT 6461 void MacroAssembler::verify_cross_modify_fence_not_required() { 6462 if (VerifyCrossModifyFence) { 6463 // Check if thread needs a cross modify fence. 6464 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6465 Label fence_not_required; 6466 cbz(rscratch1, fence_not_required); 6467 // If it does then fail. 6468 lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)); 6469 mov(c_rarg0, rthread); 6470 blr(rscratch1); 6471 bind(fence_not_required); 6472 } 6473 } 6474 #endif 6475 6476 void MacroAssembler::spin_wait() { 6477 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6478 switch (VM_Version::spin_wait_desc().inst()) { 6479 case SpinWait::NOP: 6480 nop(); 6481 break; 6482 case SpinWait::ISB: 6483 isb(); 6484 break; 6485 case SpinWait::YIELD: 6486 yield(); 6487 break; 6488 default: 6489 ShouldNotReachHere(); 6490 } 6491 } 6492 } 6493 6494 // Stack frame creation/removal 6495 6496 void MacroAssembler::enter(bool strip_ret_addr) { 6497 if (strip_ret_addr) { 6498 // Addresses can only be signed once. If there are multiple nested frames being created 6499 // in the same function, then the return address needs stripping first. 6500 strip_return_address(); 6501 } 6502 protect_return_address(); 6503 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6504 mov(rfp, sp); 6505 } 6506 6507 void MacroAssembler::leave() { 6508 mov(sp, rfp); 6509 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6510 authenticate_return_address(); 6511 } 6512 6513 // ROP Protection 6514 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6515 // destroying stack frames or whenever directly loading/storing the LR to memory. 6516 // If ROP protection is not set then these functions are no-ops. 6517 // For more details on PAC see pauth_aarch64.hpp. 6518 6519 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6520 // Uses value zero as the modifier. 6521 // 6522 void MacroAssembler::protect_return_address() { 6523 if (VM_Version::use_rop_protection()) { 6524 check_return_address(); 6525 paciaz(); 6526 } 6527 } 6528 6529 // Sign the return value in the given register. Use before updating the LR in the existing stack 6530 // frame for the current function. 6531 // Uses value zero as the modifier. 6532 // 6533 void MacroAssembler::protect_return_address(Register return_reg) { 6534 if (VM_Version::use_rop_protection()) { 6535 check_return_address(return_reg); 6536 paciza(return_reg); 6537 } 6538 } 6539 6540 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6541 // Uses value zero as the modifier. 6542 // 6543 void MacroAssembler::authenticate_return_address() { 6544 if (VM_Version::use_rop_protection()) { 6545 autiaz(); 6546 check_return_address(); 6547 } 6548 } 6549 6550 // Authenticate the return value in the given register. Use before updating the LR in the existing 6551 // stack frame for the current function. 6552 // Uses value zero as the modifier. 6553 // 6554 void MacroAssembler::authenticate_return_address(Register return_reg) { 6555 if (VM_Version::use_rop_protection()) { 6556 autiza(return_reg); 6557 check_return_address(return_reg); 6558 } 6559 } 6560 6561 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6562 // there is no guaranteed way of authenticating the LR. 6563 // 6564 void MacroAssembler::strip_return_address() { 6565 if (VM_Version::use_rop_protection()) { 6566 xpaclri(); 6567 } 6568 } 6569 6570 #ifndef PRODUCT 6571 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6572 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6573 // it is difficult to debug back to the callee function. 6574 // This function simply loads from the address in the given register. 6575 // Use directly after authentication to catch authentication failures. 6576 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6577 // 6578 void MacroAssembler::check_return_address(Register return_reg) { 6579 if (VM_Version::use_rop_protection()) { 6580 ldr(zr, Address(return_reg)); 6581 } 6582 } 6583 #endif 6584 6585 // The java_calling_convention describes stack locations as ideal slots on 6586 // a frame with no abi restrictions. Since we must observe abi restrictions 6587 // (like the placement of the register window) the slots must be biased by 6588 // the following value. 6589 static int reg2offset_in(VMReg r) { 6590 // Account for saved rfp and lr 6591 // This should really be in_preserve_stack_slots 6592 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6593 } 6594 6595 static int reg2offset_out(VMReg r) { 6596 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6597 } 6598 6599 // On 64bit we will store integer like items to the stack as 6600 // 64bits items (AArch64 ABI) even though java would only store 6601 // 32bits for a parameter. On 32bit it will simply be 32bits 6602 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6603 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6604 if (src.first()->is_stack()) { 6605 if (dst.first()->is_stack()) { 6606 // stack to stack 6607 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6608 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6609 } else { 6610 // stack to reg 6611 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6612 } 6613 } else if (dst.first()->is_stack()) { 6614 // reg to stack 6615 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6616 } else { 6617 if (dst.first() != src.first()) { 6618 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6619 } 6620 } 6621 } 6622 6623 // An oop arg. Must pass a handle not the oop itself 6624 void MacroAssembler::object_move( 6625 OopMap* map, 6626 int oop_handle_offset, 6627 int framesize_in_slots, 6628 VMRegPair src, 6629 VMRegPair dst, 6630 bool is_receiver, 6631 int* receiver_offset) { 6632 6633 // must pass a handle. First figure out the location we use as a handle 6634 6635 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6636 6637 // See if oop is null if it is we need no handle 6638 6639 if (src.first()->is_stack()) { 6640 6641 // Oop is already on the stack as an argument 6642 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6643 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6644 if (is_receiver) { 6645 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6646 } 6647 6648 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6649 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6650 // conditionally move a null 6651 cmp(rscratch1, zr); 6652 csel(rHandle, zr, rHandle, Assembler::EQ); 6653 } else { 6654 6655 // Oop is in an a register we must store it to the space we reserve 6656 // on the stack for oop_handles and pass a handle if oop is non-null 6657 6658 const Register rOop = src.first()->as_Register(); 6659 int oop_slot; 6660 if (rOop == j_rarg0) 6661 oop_slot = 0; 6662 else if (rOop == j_rarg1) 6663 oop_slot = 1; 6664 else if (rOop == j_rarg2) 6665 oop_slot = 2; 6666 else if (rOop == j_rarg3) 6667 oop_slot = 3; 6668 else if (rOop == j_rarg4) 6669 oop_slot = 4; 6670 else if (rOop == j_rarg5) 6671 oop_slot = 5; 6672 else if (rOop == j_rarg6) 6673 oop_slot = 6; 6674 else { 6675 assert(rOop == j_rarg7, "wrong register"); 6676 oop_slot = 7; 6677 } 6678 6679 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6680 int offset = oop_slot*VMRegImpl::stack_slot_size; 6681 6682 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6683 // Store oop in handle area, may be null 6684 str(rOop, Address(sp, offset)); 6685 if (is_receiver) { 6686 *receiver_offset = offset; 6687 } 6688 6689 cmp(rOop, zr); 6690 lea(rHandle, Address(sp, offset)); 6691 // conditionally move a null 6692 csel(rHandle, zr, rHandle, Assembler::EQ); 6693 } 6694 6695 // If arg is on the stack then place it otherwise it is already in correct reg. 6696 if (dst.first()->is_stack()) { 6697 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6698 } 6699 } 6700 6701 // A float arg may have to do float reg int reg conversion 6702 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6703 if (src.first()->is_stack()) { 6704 if (dst.first()->is_stack()) { 6705 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6706 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6707 } else { 6708 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6709 } 6710 } else if (src.first() != dst.first()) { 6711 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6712 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6713 else 6714 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6715 } 6716 } 6717 6718 // A long move 6719 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6720 if (src.first()->is_stack()) { 6721 if (dst.first()->is_stack()) { 6722 // stack to stack 6723 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6724 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6725 } else { 6726 // stack to reg 6727 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6728 } 6729 } else if (dst.first()->is_stack()) { 6730 // reg to stack 6731 // Do we really have to sign extend??? 6732 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6733 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6734 } else { 6735 if (dst.first() != src.first()) { 6736 mov(dst.first()->as_Register(), src.first()->as_Register()); 6737 } 6738 } 6739 } 6740 6741 6742 // A double move 6743 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6744 if (src.first()->is_stack()) { 6745 if (dst.first()->is_stack()) { 6746 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6747 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6748 } else { 6749 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6750 } 6751 } else if (src.first() != dst.first()) { 6752 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6753 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6754 else 6755 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6756 } 6757 } 6758 6759 // Implements lightweight-locking. 6760 // 6761 // - obj: the object to be locked 6762 // - t1, t2, t3: temporary registers, will be destroyed 6763 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 6764 void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6765 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6766 assert_different_registers(obj, t1, t2, t3, rscratch1); 6767 6768 Label push; 6769 const Register top = t1; 6770 const Register mark = t2; 6771 const Register t = t3; 6772 6773 // Preload the markWord. It is important that this is the first 6774 // instruction emitted as it is part of C1's null check semantics. 6775 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6776 6777 // Check if the lock-stack is full. 6778 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6779 cmpw(top, (unsigned)LockStack::end_offset()); 6780 br(Assembler::GE, slow); 6781 6782 // Check for recursion. 6783 subw(t, top, oopSize); 6784 ldr(t, Address(rthread, t)); 6785 cmp(obj, t); 6786 br(Assembler::EQ, push); 6787 6788 // Check header for monitor (0b10). 6789 tst(mark, markWord::monitor_value); 6790 br(Assembler::NE, slow); 6791 6792 // Try to lock. Transition lock bits 0b01 => 0b00 6793 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6794 orr(mark, mark, markWord::unlocked_value); 6795 eor(t, mark, markWord::unlocked_value); 6796 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 6797 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 6798 br(Assembler::NE, slow); 6799 6800 bind(push); 6801 // After successful lock, push object on lock-stack. 6802 str(obj, Address(rthread, top)); 6803 addw(top, top, oopSize); 6804 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6805 } 6806 6807 // Implements lightweight-unlocking. 6808 // 6809 // - obj: the object to be unlocked 6810 // - t1, t2, t3: temporary registers 6811 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 6812 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6813 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6814 // cmpxchg clobbers rscratch1. 6815 assert_different_registers(obj, t1, t2, t3, rscratch1); 6816 6817 #ifdef ASSERT 6818 { 6819 // Check for lock-stack underflow. 6820 Label stack_ok; 6821 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6822 cmpw(t1, (unsigned)LockStack::start_offset()); 6823 br(Assembler::GE, stack_ok); 6824 STOP("Lock-stack underflow"); 6825 bind(stack_ok); 6826 } 6827 #endif 6828 6829 Label unlocked, push_and_slow; 6830 const Register top = t1; 6831 const Register mark = t2; 6832 const Register t = t3; 6833 6834 // Check if obj is top of lock-stack. 6835 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6836 subw(top, top, oopSize); 6837 ldr(t, Address(rthread, top)); 6838 cmp(obj, t); 6839 br(Assembler::NE, slow); 6840 6841 // Pop lock-stack. 6842 DEBUG_ONLY(str(zr, Address(rthread, top));) 6843 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6844 6845 // Check if recursive. 6846 subw(t, top, oopSize); 6847 ldr(t, Address(rthread, t)); 6848 cmp(obj, t); 6849 br(Assembler::EQ, unlocked); 6850 6851 // Not recursive. Check header for monitor (0b10). 6852 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6853 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 6854 6855 #ifdef ASSERT 6856 // Check header not unlocked (0b01). 6857 Label not_unlocked; 6858 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 6859 stop("lightweight_unlock already unlocked"); 6860 bind(not_unlocked); 6861 #endif 6862 6863 // Try to unlock. Transition lock bits 0b00 => 0b01 6864 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6865 orr(t, mark, markWord::unlocked_value); 6866 cmpxchg(obj, mark, t, Assembler::xword, 6867 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 6868 br(Assembler::EQ, unlocked); 6869 6870 bind(push_and_slow); 6871 // Restore lock-stack and handle the unlock in runtime. 6872 DEBUG_ONLY(str(obj, Address(rthread, top));) 6873 addw(top, top, oopSize); 6874 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6875 b(slow); 6876 6877 bind(unlocked); 6878 }