1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "code/compiledIC.hpp" 31 #include "compiler/compileTask.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "gc/shared/barrierSetAssembler.hpp" 36 #include "gc/shared/cardTableBarrierSet.hpp" 37 #include "gc/shared/cardTable.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/tlab_globals.hpp" 40 #include "interpreter/bytecodeHistogram.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "jvm.h" 43 #include "memory/resourceArea.hpp" 44 #include "memory/universe.hpp" 45 #include "nativeInst_aarch64.hpp" 46 #include "oops/accessDecorators.hpp" 47 #include "oops/compressedKlass.inline.hpp" 48 #include "oops/compressedOops.inline.hpp" 49 #include "oops/klass.inline.hpp" 50 #include "runtime/continuation.hpp" 51 #include "runtime/icache.hpp" 52 #include "runtime/interfaceSupport.inline.hpp" 53 #include "runtime/javaThread.hpp" 54 #include "runtime/jniHandles.inline.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/globalDefinitions.hpp" 58 #include "utilities/powerOfTwo.hpp" 59 #ifdef COMPILER1 60 #include "c1/c1_LIRAssembler.hpp" 61 #endif 62 #ifdef COMPILER2 63 #include "oops/oop.hpp" 64 #include "opto/compile.hpp" 65 #include "opto/node.hpp" 66 #include "opto/output.hpp" 67 #endif 68 69 #include <sys/types.h> 70 71 #ifdef PRODUCT 72 #define BLOCK_COMMENT(str) /* nothing */ 73 #else 74 #define BLOCK_COMMENT(str) block_comment(str) 75 #endif 76 #define STOP(str) stop(str); 77 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 78 79 #ifdef ASSERT 80 extern "C" void disnm(intptr_t p); 81 #endif 82 // Target-dependent relocation processing 83 // 84 // Instruction sequences whose target may need to be retrieved or 85 // patched are distinguished by their leading instruction, sorting 86 // them into three main instruction groups and related subgroups. 87 // 88 // 1) Branch, Exception and System (insn count = 1) 89 // 1a) Unconditional branch (immediate): 90 // b/bl imm19 91 // 1b) Compare & branch (immediate): 92 // cbz/cbnz Rt imm19 93 // 1c) Test & branch (immediate): 94 // tbz/tbnz Rt imm14 95 // 1d) Conditional branch (immediate): 96 // b.cond imm19 97 // 98 // 2) Loads and Stores (insn count = 1) 99 // 2a) Load register literal: 100 // ldr Rt imm19 101 // 102 // 3) Data Processing Immediate (insn count = 2 or 3) 103 // 3a) PC-rel. addressing 104 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 105 // adr/adrp Rx imm21; add Ry Rx #imm12 106 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 107 // adr/adrp Rx imm21 108 // adr/adrp Rx imm21; movk Rx #imm16<<32 109 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 110 // The latter form can only happen when the target is an 111 // ExternalAddress, and (by definition) ExternalAddresses don't 112 // move. Because of that property, there is never any need to 113 // patch the last of the three instructions. However, 114 // MacroAssembler::target_addr_for_insn takes all three 115 // instructions into account and returns the correct address. 116 // 3b) Move wide (immediate) 117 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 118 // 119 // A switch on a subset of the instruction's bits provides an 120 // efficient dispatch to these subcases. 121 // 122 // insn[28:26] -> main group ('x' == don't care) 123 // 00x -> UNALLOCATED 124 // 100 -> Data Processing Immediate 125 // 101 -> Branch, Exception and System 126 // x1x -> Loads and Stores 127 // 128 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 129 // n.b. in some cases extra bits need to be checked to verify the 130 // instruction is as expected 131 // 132 // 1) ... xx101x Branch, Exception and System 133 // 1a) 00___x Unconditional branch (immediate) 134 // 1b) 01___0 Compare & branch (immediate) 135 // 1c) 01___1 Test & branch (immediate) 136 // 1d) 10___0 Conditional branch (immediate) 137 // other Should not happen 138 // 139 // 2) ... xxx1x0 Loads and Stores 140 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 141 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 142 // strictly should be 64 bit non-FP/SIMD i.e. 143 // 0101_000 (i.e. requires insn[31:24] == 01011000) 144 // 145 // 3) ... xx100x Data Processing Immediate 146 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 147 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 148 // strictly should be 64 bit movz #imm16<<0 149 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 150 // 151 class RelocActions { 152 protected: 153 typedef int (*reloc_insn)(address insn_addr, address &target); 154 155 virtual reloc_insn adrpMem() = 0; 156 virtual reloc_insn adrpAdd() = 0; 157 virtual reloc_insn adrpMovk() = 0; 158 159 const address _insn_addr; 160 const uint32_t _insn; 161 162 static uint32_t insn_at(address insn_addr, int n) { 163 return ((uint32_t*)insn_addr)[n]; 164 } 165 uint32_t insn_at(int n) const { 166 return insn_at(_insn_addr, n); 167 } 168 169 public: 170 171 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 172 RelocActions(address insn_addr, uint32_t insn) 173 : _insn_addr(insn_addr), _insn(insn) {} 174 175 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 176 virtual int conditionalBranch(address insn_addr, address &target) = 0; 177 virtual int testAndBranch(address insn_addr, address &target) = 0; 178 virtual int loadStore(address insn_addr, address &target) = 0; 179 virtual int adr(address insn_addr, address &target) = 0; 180 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 181 virtual int immediate(address insn_addr, address &target) = 0; 182 virtual void verify(address insn_addr, address &target) = 0; 183 184 int ALWAYSINLINE run(address insn_addr, address &target) { 185 int instructions = 1; 186 187 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 188 switch(dispatch) { 189 case 0b001010: 190 case 0b001011: { 191 instructions = unconditionalBranch(insn_addr, target); 192 break; 193 } 194 case 0b101010: // Conditional branch (immediate) 195 case 0b011010: { // Compare & branch (immediate) 196 instructions = conditionalBranch(insn_addr, target); 197 break; 198 } 199 case 0b011011: { 200 instructions = testAndBranch(insn_addr, target); 201 break; 202 } 203 case 0b001100: 204 case 0b001110: 205 case 0b011100: 206 case 0b011110: 207 case 0b101100: 208 case 0b101110: 209 case 0b111100: 210 case 0b111110: { 211 // load/store 212 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 213 // Load register (literal) 214 instructions = loadStore(insn_addr, target); 215 break; 216 } else { 217 // nothing to do 218 assert(target == nullptr, "did not expect to relocate target for polling page load"); 219 } 220 break; 221 } 222 case 0b001000: 223 case 0b011000: 224 case 0b101000: 225 case 0b111000: { 226 // adr/adrp 227 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 228 int shift = Instruction_aarch64::extract(_insn, 31, 31); 229 if (shift) { 230 uint32_t insn2 = insn_at(1); 231 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 232 Instruction_aarch64::extract(_insn, 4, 0) == 233 Instruction_aarch64::extract(insn2, 9, 5)) { 234 instructions = adrp(insn_addr, target, adrpMem()); 235 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 236 Instruction_aarch64::extract(_insn, 4, 0) == 237 Instruction_aarch64::extract(insn2, 4, 0)) { 238 instructions = adrp(insn_addr, target, adrpAdd()); 239 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 240 Instruction_aarch64::extract(_insn, 4, 0) == 241 Instruction_aarch64::extract(insn2, 4, 0)) { 242 instructions = adrp(insn_addr, target, adrpMovk()); 243 } else { 244 ShouldNotReachHere(); 245 } 246 } else { 247 instructions = adr(insn_addr, target); 248 } 249 break; 250 } 251 case 0b001001: 252 case 0b011001: 253 case 0b101001: 254 case 0b111001: { 255 instructions = immediate(insn_addr, target); 256 break; 257 } 258 default: { 259 ShouldNotReachHere(); 260 } 261 } 262 263 verify(insn_addr, target); 264 return instructions * NativeInstruction::instruction_size; 265 } 266 }; 267 268 class Patcher : public RelocActions { 269 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 270 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 271 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 272 273 public: 274 Patcher(address insn_addr) : RelocActions(insn_addr) {} 275 276 virtual int unconditionalBranch(address insn_addr, address &target) { 277 intptr_t offset = (target - insn_addr) >> 2; 278 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 279 return 1; 280 } 281 virtual int conditionalBranch(address insn_addr, address &target) { 282 intptr_t offset = (target - insn_addr) >> 2; 283 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 284 return 1; 285 } 286 virtual int testAndBranch(address insn_addr, address &target) { 287 intptr_t offset = (target - insn_addr) >> 2; 288 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 289 return 1; 290 } 291 virtual int loadStore(address insn_addr, address &target) { 292 intptr_t offset = (target - insn_addr) >> 2; 293 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 294 return 1; 295 } 296 virtual int adr(address insn_addr, address &target) { 297 #ifdef ASSERT 298 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 299 #endif 300 // PC-rel. addressing 301 ptrdiff_t offset = target - insn_addr; 302 int offset_lo = offset & 3; 303 offset >>= 2; 304 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 305 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 306 return 1; 307 } 308 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 309 int instructions = 1; 310 #ifdef ASSERT 311 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 312 #endif 313 ptrdiff_t offset = target - insn_addr; 314 instructions = 2; 315 precond(inner != nullptr); 316 // Give the inner reloc a chance to modify the target. 317 address adjusted_target = target; 318 instructions = (*inner)(insn_addr, adjusted_target); 319 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 320 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 321 offset = adr_page - pc_page; 322 int offset_lo = offset & 3; 323 offset >>= 2; 324 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 325 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 326 return instructions; 327 } 328 static int adrpMem_impl(address insn_addr, address &target) { 329 uintptr_t dest = (uintptr_t)target; 330 int offset_lo = dest & 0xfff; 331 uint32_t insn2 = insn_at(insn_addr, 1); 332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 334 guarantee(((dest >> size) << size) == dest, "misaligned target"); 335 return 2; 336 } 337 static int adrpAdd_impl(address insn_addr, address &target) { 338 uintptr_t dest = (uintptr_t)target; 339 int offset_lo = dest & 0xfff; 340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 341 return 2; 342 } 343 static int adrpMovk_impl(address insn_addr, address &target) { 344 uintptr_t dest = uintptr_t(target); 345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 347 target = address(dest); 348 return 2; 349 } 350 virtual int immediate(address insn_addr, address &target) { 351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 352 uint64_t dest = (uint64_t)target; 353 // Move wide constant 354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 359 return 3; 360 } 361 virtual void verify(address insn_addr, address &target) { 362 #ifdef ASSERT 363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 364 if (!(address_is == target)) { 365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 366 disnm((intptr_t)insn_addr); 367 assert(address_is == target, "should be"); 368 } 369 #endif 370 } 371 }; 372 373 // If insn1 and insn2 use the same register to form an address, either 374 // by an offsetted LDR or a simple ADD, return the offset. If the 375 // second instruction is an LDR, the offset may be scaled. 376 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 377 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 378 Instruction_aarch64::extract(insn1, 4, 0) == 379 Instruction_aarch64::extract(insn2, 9, 5)) { 380 // Load/store register (unsigned immediate) 381 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 382 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 383 byte_offset <<= size; 384 return true; 385 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 386 Instruction_aarch64::extract(insn1, 4, 0) == 387 Instruction_aarch64::extract(insn2, 4, 0)) { 388 // add (immediate) 389 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 390 return true; 391 } 392 return false; 393 } 394 395 class AArch64Decoder : public RelocActions { 396 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 397 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 398 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 399 400 public: 401 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 402 403 virtual int loadStore(address insn_addr, address &target) { 404 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 405 target = insn_addr + (offset << 2); 406 return 1; 407 } 408 virtual int unconditionalBranch(address insn_addr, address &target) { 409 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 410 target = insn_addr + (offset << 2); 411 return 1; 412 } 413 virtual int conditionalBranch(address insn_addr, address &target) { 414 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 415 target = address(((uint64_t)insn_addr + (offset << 2))); 416 return 1; 417 } 418 virtual int testAndBranch(address insn_addr, address &target) { 419 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 420 target = address(((uint64_t)insn_addr + (offset << 2))); 421 return 1; 422 } 423 virtual int adr(address insn_addr, address &target) { 424 // PC-rel. addressing 425 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 426 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 427 target = address((uint64_t)insn_addr + offset); 428 return 1; 429 } 430 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 431 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 432 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 433 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 434 int shift = 12; 435 offset <<= shift; 436 uint64_t target_page = ((uint64_t)insn_addr) + offset; 437 target_page &= ((uint64_t)-1) << shift; 438 uint32_t insn2 = insn_at(1); 439 target = address(target_page); 440 precond(inner != nullptr); 441 (*inner)(insn_addr, target); 442 return 2; 443 } 444 static int adrpMem_impl(address insn_addr, address &target) { 445 uint32_t insn2 = insn_at(insn_addr, 1); 446 // Load/store register (unsigned immediate) 447 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 448 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 449 byte_offset <<= size; 450 target += byte_offset; 451 return 2; 452 } 453 static int adrpAdd_impl(address insn_addr, address &target) { 454 uint32_t insn2 = insn_at(insn_addr, 1); 455 // add (immediate) 456 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 457 target += byte_offset; 458 return 2; 459 } 460 static int adrpMovk_impl(address insn_addr, address &target) { 461 uint32_t insn2 = insn_at(insn_addr, 1); 462 uint64_t dest = uint64_t(target); 463 dest = (dest & 0xffff0000ffffffff) | 464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 465 target = address(dest); 466 467 // We know the destination 4k page. Maybe we have a third 468 // instruction. 469 uint32_t insn = insn_at(insn_addr, 0); 470 uint32_t insn3 = insn_at(insn_addr, 2); 471 ptrdiff_t byte_offset; 472 if (offset_for(insn, insn3, byte_offset)) { 473 target += byte_offset; 474 return 3; 475 } else { 476 return 2; 477 } 478 } 479 virtual int immediate(address insn_addr, address &target) { 480 uint32_t *insns = (uint32_t *)insn_addr; 481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 482 // Move wide constant: movz, movk, movk. See movptr(). 483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 490 return 3; 491 } 492 virtual void verify(address insn_addr, address &target) { 493 } 494 }; 495 496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 497 AArch64Decoder decoder(insn_addr, insn); 498 address target; 499 decoder.run(insn_addr, target); 500 return target; 501 } 502 503 // Patch any kind of instruction; there may be several instructions. 504 // Return the total length (in bytes) of the instructions. 505 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 506 Patcher patcher(insn_addr); 507 return patcher.run(insn_addr, target); 508 } 509 510 int MacroAssembler::patch_oop(address insn_addr, address o) { 511 int instructions; 512 unsigned insn = *(unsigned*)insn_addr; 513 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 514 515 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 516 // narrow OOPs by setting the upper 16 bits in the first 517 // instruction. 518 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 519 // Move narrow OOP 520 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 521 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 522 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 523 instructions = 2; 524 } else { 525 // Move wide OOP 526 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 527 uintptr_t dest = (uintptr_t)o; 528 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 529 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 530 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 531 instructions = 3; 532 } 533 return instructions * NativeInstruction::instruction_size; 534 } 535 536 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 537 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 538 // We encode narrow ones by setting the upper 16 bits in the first 539 // instruction. 540 NativeInstruction *insn = nativeInstruction_at(insn_addr); 541 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 542 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 543 544 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 545 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 546 return 2 * NativeInstruction::instruction_size; 547 } 548 549 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 550 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 551 return nullptr; 552 } 553 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 554 } 555 556 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 557 if (acquire) { 558 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 559 ldar(tmp, tmp); 560 } else { 561 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 562 } 563 if (at_return) { 564 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 565 // we may safely use the sp instead to perform the stack watermark check. 566 cmp(in_nmethod ? sp : rfp, tmp); 567 br(Assembler::HI, slow_path); 568 } else { 569 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 570 } 571 } 572 573 void MacroAssembler::rt_call(address dest, Register tmp) { 574 CodeBlob *cb = CodeCache::find_blob(dest); 575 if (cb) { 576 far_call(RuntimeAddress(dest)); 577 } else { 578 lea(tmp, RuntimeAddress(dest)); 579 blr(tmp); 580 } 581 } 582 583 void MacroAssembler::push_cont_fastpath(Register java_thread) { 584 if (!Continuations::enabled()) return; 585 Label done; 586 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 587 cmp(sp, rscratch1); 588 br(Assembler::LS, done); 589 mov(rscratch1, sp); // we can't use sp as the source in str 590 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 591 bind(done); 592 } 593 594 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 595 if (!Continuations::enabled()) return; 596 Label done; 597 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 598 cmp(sp, rscratch1); 599 br(Assembler::LO, done); 600 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 601 bind(done); 602 } 603 604 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 605 // we must set sp to zero to clear frame 606 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 607 608 // must clear fp, so that compiled frames are not confused; it is 609 // possible that we need it only for debugging 610 if (clear_fp) { 611 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 612 } 613 614 // Always clear the pc because it could have been set by make_walkable() 615 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 616 } 617 618 // Calls to C land 619 // 620 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 621 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 622 // has to be reset to 0. This is required to allow proper stack traversal. 623 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 624 Register last_java_fp, 625 Register last_java_pc, 626 Register scratch) { 627 628 if (last_java_pc->is_valid()) { 629 str(last_java_pc, Address(rthread, 630 JavaThread::frame_anchor_offset() 631 + JavaFrameAnchor::last_Java_pc_offset())); 632 } 633 634 // determine last_java_sp register 635 if (last_java_sp == sp) { 636 mov(scratch, sp); 637 last_java_sp = scratch; 638 } else if (!last_java_sp->is_valid()) { 639 last_java_sp = esp; 640 } 641 642 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 643 644 // last_java_fp is optional 645 if (last_java_fp->is_valid()) { 646 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 647 } 648 } 649 650 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 651 Register last_java_fp, 652 address last_java_pc, 653 Register scratch) { 654 assert(last_java_pc != nullptr, "must provide a valid PC"); 655 656 adr(scratch, last_java_pc); 657 str(scratch, Address(rthread, 658 JavaThread::frame_anchor_offset() 659 + JavaFrameAnchor::last_Java_pc_offset())); 660 661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 662 } 663 664 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 665 Register last_java_fp, 666 Label &L, 667 Register scratch) { 668 if (L.is_bound()) { 669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 670 } else { 671 InstructionMark im(this); 672 L.add_patch_at(code(), locator()); 673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 674 } 675 } 676 677 static inline bool target_needs_far_branch(address addr) { 678 // codecache size <= 128M 679 if (!MacroAssembler::far_branches()) { 680 return false; 681 } 682 // codecache size > 240M 683 if (MacroAssembler::codestub_branch_needs_far_jump()) { 684 return true; 685 } 686 // codecache size: 128M..240M 687 return !CodeCache::is_non_nmethod(addr); 688 } 689 690 void MacroAssembler::far_call(Address entry, Register tmp) { 691 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 692 assert(CodeCache::find_blob(entry.target()) != nullptr, 693 "destination of far call not found in code cache"); 694 assert(entry.rspec().type() == relocInfo::external_word_type 695 || entry.rspec().type() == relocInfo::runtime_call_type 696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 697 if (target_needs_far_branch(entry.target())) { 698 uint64_t offset; 699 // We can use ADRP here because we know that the total size of 700 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 701 adrp(tmp, entry, offset); 702 add(tmp, tmp, offset); 703 blr(tmp); 704 } else { 705 bl(entry); 706 } 707 } 708 709 int MacroAssembler::far_jump(Address entry, Register tmp) { 710 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 711 assert(CodeCache::find_blob(entry.target()) != nullptr, 712 "destination of far call not found in code cache"); 713 assert(entry.rspec().type() == relocInfo::external_word_type 714 || entry.rspec().type() == relocInfo::runtime_call_type 715 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 716 address start = pc(); 717 if (target_needs_far_branch(entry.target())) { 718 uint64_t offset; 719 // We can use ADRP here because we know that the total size of 720 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 721 adrp(tmp, entry, offset); 722 add(tmp, tmp, offset); 723 br(tmp); 724 } else { 725 b(entry); 726 } 727 return pc() - start; 728 } 729 730 void MacroAssembler::reserved_stack_check() { 731 // testing if reserved zone needs to be enabled 732 Label no_reserved_zone_enabling; 733 734 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 735 cmp(sp, rscratch1); 736 br(Assembler::LO, no_reserved_zone_enabling); 737 738 enter(); // LR and FP are live. 739 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone))); 740 mov(c_rarg0, rthread); 741 blr(rscratch1); 742 leave(); 743 744 // We have already removed our own frame. 745 // throw_delayed_StackOverflowError will think that it's been 746 // called by our caller. 747 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 748 br(rscratch1); 749 should_not_reach_here(); 750 751 bind(no_reserved_zone_enabling); 752 } 753 754 static void pass_arg0(MacroAssembler* masm, Register arg) { 755 if (c_rarg0 != arg ) { 756 masm->mov(c_rarg0, arg); 757 } 758 } 759 760 static void pass_arg1(MacroAssembler* masm, Register arg) { 761 if (c_rarg1 != arg ) { 762 masm->mov(c_rarg1, arg); 763 } 764 } 765 766 static void pass_arg2(MacroAssembler* masm, Register arg) { 767 if (c_rarg2 != arg ) { 768 masm->mov(c_rarg2, arg); 769 } 770 } 771 772 static void pass_arg3(MacroAssembler* masm, Register arg) { 773 if (c_rarg3 != arg ) { 774 masm->mov(c_rarg3, arg); 775 } 776 } 777 778 void MacroAssembler::call_VM_base(Register oop_result, 779 Register java_thread, 780 Register last_java_sp, 781 address entry_point, 782 int number_of_arguments, 783 bool check_exceptions) { 784 // determine java_thread register 785 if (!java_thread->is_valid()) { 786 java_thread = rthread; 787 } 788 789 // determine last_java_sp register 790 if (!last_java_sp->is_valid()) { 791 last_java_sp = esp; 792 } 793 794 // debugging support 795 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 796 assert(java_thread == rthread, "unexpected register"); 797 #ifdef ASSERT 798 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 799 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 800 #endif // ASSERT 801 802 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 803 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 804 805 // push java thread (becomes first argument of C function) 806 807 mov(c_rarg0, java_thread); 808 809 // set last Java frame before call 810 assert(last_java_sp != rfp, "can't use rfp"); 811 812 Label l; 813 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 814 815 // do the call, remove parameters 816 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 817 818 // lr could be poisoned with PAC signature during throw_pending_exception 819 // if it was tail-call optimized by compiler, since lr is not callee-saved 820 // reload it with proper value 821 adr(lr, l); 822 823 // reset last Java frame 824 // Only interpreter should have to clear fp 825 reset_last_Java_frame(true); 826 827 // C++ interp handles this in the interpreter 828 check_and_handle_popframe(java_thread); 829 check_and_handle_earlyret(java_thread); 830 831 if (check_exceptions) { 832 // check for pending exceptions (java_thread is set upon return) 833 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 834 Label ok; 835 cbz(rscratch1, ok); 836 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 837 br(rscratch1); 838 bind(ok); 839 } 840 841 // get oop result if there is one and reset the value in the thread 842 if (oop_result->is_valid()) { 843 get_vm_result(oop_result, java_thread); 844 } 845 } 846 847 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 848 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 849 } 850 851 // Check the entry target is always reachable from any branch. 852 static bool is_always_within_branch_range(Address entry) { 853 const address target = entry.target(); 854 855 if (!CodeCache::contains(target)) { 856 // We always use trampolines for callees outside CodeCache. 857 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 858 return false; 859 } 860 861 if (!MacroAssembler::far_branches()) { 862 return true; 863 } 864 865 if (entry.rspec().type() == relocInfo::runtime_call_type) { 866 // Runtime calls are calls of a non-compiled method (stubs, adapters). 867 // Non-compiled methods stay forever in CodeCache. 868 // We check whether the longest possible branch is within the branch range. 869 assert(CodeCache::find_blob(target) != nullptr && 870 !CodeCache::find_blob(target)->is_nmethod(), 871 "runtime call of compiled method"); 872 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 873 const address left_longest_branch_start = CodeCache::low_bound(); 874 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 875 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 876 return is_reachable; 877 } 878 879 return false; 880 } 881 882 // Maybe emit a call via a trampoline. If the code cache is small 883 // trampolines won't be emitted. 884 address MacroAssembler::trampoline_call(Address entry) { 885 assert(entry.rspec().type() == relocInfo::runtime_call_type 886 || entry.rspec().type() == relocInfo::opt_virtual_call_type 887 || entry.rspec().type() == relocInfo::static_call_type 888 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 889 890 address target = entry.target(); 891 892 if (!is_always_within_branch_range(entry)) { 893 if (!in_scratch_emit_size()) { 894 // We don't want to emit a trampoline if C2 is generating dummy 895 // code during its branch shortening phase. 896 if (entry.rspec().type() == relocInfo::runtime_call_type) { 897 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 898 code()->share_trampoline_for(entry.target(), offset()); 899 } else { 900 address stub = emit_trampoline_stub(offset(), target); 901 if (stub == nullptr) { 902 postcond(pc() == badAddress); 903 return nullptr; // CodeCache is full 904 } 905 } 906 } 907 target = pc(); 908 } 909 910 address call_pc = pc(); 911 relocate(entry.rspec()); 912 bl(target); 913 914 postcond(pc() != badAddress); 915 return call_pc; 916 } 917 918 // Emit a trampoline stub for a call to a target which is too far away. 919 // 920 // code sequences: 921 // 922 // call-site: 923 // branch-and-link to <destination> or <trampoline stub> 924 // 925 // Related trampoline stub for this call site in the stub section: 926 // load the call target from the constant pool 927 // branch (LR still points to the call site above) 928 929 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 930 address dest) { 931 // Max stub size: alignment nop, TrampolineStub. 932 address stub = start_a_stub(max_trampoline_stub_size()); 933 if (stub == nullptr) { 934 return nullptr; // CodeBuffer::expand failed 935 } 936 937 // Create a trampoline stub relocation which relates this trampoline stub 938 // with the call instruction at insts_call_instruction_offset in the 939 // instructions code-section. 940 align(wordSize); 941 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 942 + insts_call_instruction_offset)); 943 const int stub_start_offset = offset(); 944 945 // Now, create the trampoline stub's code: 946 // - load the call 947 // - call 948 Label target; 949 ldr(rscratch1, target); 950 br(rscratch1); 951 bind(target); 952 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 953 "should be"); 954 emit_int64((int64_t)dest); 955 956 const address stub_start_addr = addr_at(stub_start_offset); 957 958 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 959 960 end_a_stub(); 961 return stub_start_addr; 962 } 963 964 int MacroAssembler::max_trampoline_stub_size() { 965 // Max stub size: alignment nop, TrampolineStub. 966 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 967 } 968 969 void MacroAssembler::emit_static_call_stub() { 970 // CompiledDirectCall::set_to_interpreted knows the 971 // exact layout of this stub. 972 973 isb(); 974 mov_metadata(rmethod, nullptr); 975 976 // Jump to the entry point of the c2i stub. 977 movptr(rscratch1, 0); 978 br(rscratch1); 979 } 980 981 int MacroAssembler::static_call_stub_size() { 982 // isb; movk; movz; movz; movk; movz; movz; br 983 return 8 * NativeInstruction::instruction_size; 984 } 985 986 void MacroAssembler::c2bool(Register x) { 987 // implements x == 0 ? 0 : 1 988 // note: must only look at least-significant byte of x 989 // since C-style booleans are stored in one byte 990 // only! (was bug) 991 tst(x, 0xff); 992 cset(x, Assembler::NE); 993 } 994 995 address MacroAssembler::ic_call(address entry, jint method_index) { 996 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 997 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 998 // uintptr_t offset; 999 // ldr_constant(rscratch2, const_ptr); 1000 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1001 return trampoline_call(Address(entry, rh)); 1002 } 1003 1004 int MacroAssembler::ic_check_size() { 1005 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1006 return NativeInstruction::instruction_size * 7; 1007 } else { 1008 return NativeInstruction::instruction_size * 5; 1009 } 1010 } 1011 1012 int MacroAssembler::ic_check(int end_alignment) { 1013 Register receiver = j_rarg0; 1014 Register data = rscratch2; 1015 Register tmp1 = rscratch1; 1016 Register tmp2 = r10; 1017 1018 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1019 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1020 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1021 // before the inline cache check here, and not after 1022 align(end_alignment, offset() + ic_check_size()); 1023 1024 int uep_offset = offset(); 1025 1026 if (UseCompressedClassPointers) { 1027 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1028 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1029 cmpw(tmp1, tmp2); 1030 } else { 1031 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1032 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1033 cmp(tmp1, tmp2); 1034 } 1035 1036 Label dont; 1037 br(Assembler::EQ, dont); 1038 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1039 bind(dont); 1040 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1041 1042 return uep_offset; 1043 } 1044 1045 // Implementation of call_VM versions 1046 1047 void MacroAssembler::call_VM(Register oop_result, 1048 address entry_point, 1049 bool check_exceptions) { 1050 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1051 } 1052 1053 void MacroAssembler::call_VM(Register oop_result, 1054 address entry_point, 1055 Register arg_1, 1056 bool check_exceptions) { 1057 pass_arg1(this, arg_1); 1058 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1059 } 1060 1061 void MacroAssembler::call_VM(Register oop_result, 1062 address entry_point, 1063 Register arg_1, 1064 Register arg_2, 1065 bool check_exceptions) { 1066 assert_different_registers(arg_1, c_rarg2); 1067 pass_arg2(this, arg_2); 1068 pass_arg1(this, arg_1); 1069 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1070 } 1071 1072 void MacroAssembler::call_VM(Register oop_result, 1073 address entry_point, 1074 Register arg_1, 1075 Register arg_2, 1076 Register arg_3, 1077 bool check_exceptions) { 1078 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1079 assert_different_registers(arg_2, c_rarg3); 1080 pass_arg3(this, arg_3); 1081 1082 pass_arg2(this, arg_2); 1083 1084 pass_arg1(this, arg_1); 1085 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1086 } 1087 1088 void MacroAssembler::call_VM(Register oop_result, 1089 Register last_java_sp, 1090 address entry_point, 1091 int number_of_arguments, 1092 bool check_exceptions) { 1093 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1094 } 1095 1096 void MacroAssembler::call_VM(Register oop_result, 1097 Register last_java_sp, 1098 address entry_point, 1099 Register arg_1, 1100 bool check_exceptions) { 1101 pass_arg1(this, arg_1); 1102 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1103 } 1104 1105 void MacroAssembler::call_VM(Register oop_result, 1106 Register last_java_sp, 1107 address entry_point, 1108 Register arg_1, 1109 Register arg_2, 1110 bool check_exceptions) { 1111 1112 assert_different_registers(arg_1, c_rarg2); 1113 pass_arg2(this, arg_2); 1114 pass_arg1(this, arg_1); 1115 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1116 } 1117 1118 void MacroAssembler::call_VM(Register oop_result, 1119 Register last_java_sp, 1120 address entry_point, 1121 Register arg_1, 1122 Register arg_2, 1123 Register arg_3, 1124 bool check_exceptions) { 1125 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1126 assert_different_registers(arg_2, c_rarg3); 1127 pass_arg3(this, arg_3); 1128 pass_arg2(this, arg_2); 1129 pass_arg1(this, arg_1); 1130 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1131 } 1132 1133 1134 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1135 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1136 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1137 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1138 } 1139 1140 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1141 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1142 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1143 } 1144 1145 void MacroAssembler::align(int modulus) { 1146 align(modulus, offset()); 1147 } 1148 1149 // Ensure that the code at target bytes offset from the current offset() is aligned 1150 // according to modulus. 1151 void MacroAssembler::align(int modulus, int target) { 1152 int delta = target - offset(); 1153 while ((offset() + delta) % modulus != 0) nop(); 1154 } 1155 1156 void MacroAssembler::post_call_nop() { 1157 if (!Continuations::enabled()) { 1158 return; 1159 } 1160 InstructionMark im(this); 1161 relocate(post_call_nop_Relocation::spec()); 1162 InlineSkippedInstructionsCounter skipCounter(this); 1163 nop(); 1164 movk(zr, 0); 1165 movk(zr, 0); 1166 } 1167 1168 // these are no-ops overridden by InterpreterMacroAssembler 1169 1170 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1171 1172 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1173 1174 // Look up the method for a megamorphic invokeinterface call. 1175 // The target method is determined by <intf_klass, itable_index>. 1176 // The receiver klass is in recv_klass. 1177 // On success, the result will be in method_result, and execution falls through. 1178 // On failure, execution transfers to the given label. 1179 void MacroAssembler::lookup_interface_method(Register recv_klass, 1180 Register intf_klass, 1181 RegisterOrConstant itable_index, 1182 Register method_result, 1183 Register scan_temp, 1184 Label& L_no_such_interface, 1185 bool return_method) { 1186 assert_different_registers(recv_klass, intf_klass, scan_temp); 1187 assert_different_registers(method_result, intf_klass, scan_temp); 1188 assert(recv_klass != method_result || !return_method, 1189 "recv_klass can be destroyed when method isn't needed"); 1190 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1191 "caller must use same register for non-constant itable index as for method"); 1192 1193 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1194 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1195 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1196 int scan_step = itableOffsetEntry::size() * wordSize; 1197 int vte_size = vtableEntry::size_in_bytes(); 1198 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1199 1200 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1201 1202 // Could store the aligned, prescaled offset in the klass. 1203 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1204 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1205 add(scan_temp, scan_temp, vtable_base); 1206 1207 if (return_method) { 1208 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1209 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1210 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1211 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1212 if (itentry_off) 1213 add(recv_klass, recv_klass, itentry_off); 1214 } 1215 1216 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1217 // if (scan->interface() == intf) { 1218 // result = (klass + scan->offset() + itable_index); 1219 // } 1220 // } 1221 Label search, found_method; 1222 1223 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1224 cmp(intf_klass, method_result); 1225 br(Assembler::EQ, found_method); 1226 bind(search); 1227 // Check that the previous entry is non-null. A null entry means that 1228 // the receiver class doesn't implement the interface, and wasn't the 1229 // same as when the caller was compiled. 1230 cbz(method_result, L_no_such_interface); 1231 if (itableOffsetEntry::interface_offset() != 0) { 1232 add(scan_temp, scan_temp, scan_step); 1233 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1234 } else { 1235 ldr(method_result, Address(pre(scan_temp, scan_step))); 1236 } 1237 cmp(intf_klass, method_result); 1238 br(Assembler::NE, search); 1239 1240 bind(found_method); 1241 1242 // Got a hit. 1243 if (return_method) { 1244 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1245 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1246 } 1247 } 1248 1249 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1250 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1251 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1252 // The target method is determined by <holder_klass, itable_index>. 1253 // The receiver klass is in recv_klass. 1254 // On success, the result will be in method_result, and execution falls through. 1255 // On failure, execution transfers to the given label. 1256 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1257 Register holder_klass, 1258 Register resolved_klass, 1259 Register method_result, 1260 Register temp_itbl_klass, 1261 Register scan_temp, 1262 int itable_index, 1263 Label& L_no_such_interface) { 1264 // 'method_result' is only used as output register at the very end of this method. 1265 // Until then we can reuse it as 'holder_offset'. 1266 Register holder_offset = method_result; 1267 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1268 1269 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1270 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1271 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1272 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1273 1274 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1275 1276 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1277 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1278 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1279 // temp_itbl_klass = itable[0]._interface; 1280 int vtblEntrySize = vtableEntry::size_in_bytes(); 1281 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1282 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1283 mov(holder_offset, zr); 1284 // scan_temp = &(itable[0]._interface) 1285 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1286 1287 // Initial checks: 1288 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1289 // - if (itable[0] == holder_klass), shortcut to "holder found" 1290 // - if (itable[0] == 0), no such interface 1291 cmp(resolved_klass, holder_klass); 1292 br(Assembler::NE, L_loop_search_resolved_entry); 1293 cmp(holder_klass, temp_itbl_klass); 1294 br(Assembler::EQ, L_holder_found); 1295 cbz(temp_itbl_klass, L_no_such_interface); 1296 1297 // Loop: Look for holder_klass record in itable 1298 // do { 1299 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1300 // if (temp_itbl_klass == holder_klass) { 1301 // goto L_holder_found; // Found! 1302 // } 1303 // } while (temp_itbl_klass != 0); 1304 // goto L_no_such_interface // Not found. 1305 Label L_search_holder; 1306 bind(L_search_holder); 1307 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1308 cmp(holder_klass, temp_itbl_klass); 1309 br(Assembler::EQ, L_holder_found); 1310 cbnz(temp_itbl_klass, L_search_holder); 1311 1312 b(L_no_such_interface); 1313 1314 // Loop: Look for resolved_class record in itable 1315 // while (true) { 1316 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1317 // if (temp_itbl_klass == 0) { 1318 // goto L_no_such_interface; 1319 // } 1320 // if (temp_itbl_klass == resolved_klass) { 1321 // goto L_resolved_found; // Found! 1322 // } 1323 // if (temp_itbl_klass == holder_klass) { 1324 // holder_offset = scan_temp; 1325 // } 1326 // } 1327 // 1328 Label L_loop_search_resolved; 1329 bind(L_loop_search_resolved); 1330 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1331 bind(L_loop_search_resolved_entry); 1332 cbz(temp_itbl_klass, L_no_such_interface); 1333 cmp(resolved_klass, temp_itbl_klass); 1334 br(Assembler::EQ, L_resolved_found); 1335 cmp(holder_klass, temp_itbl_klass); 1336 br(Assembler::NE, L_loop_search_resolved); 1337 mov(holder_offset, scan_temp); 1338 b(L_loop_search_resolved); 1339 1340 // See if we already have a holder klass. If not, go and scan for it. 1341 bind(L_resolved_found); 1342 cbz(holder_offset, L_search_holder); 1343 mov(scan_temp, holder_offset); 1344 1345 // Finally, scan_temp contains holder_klass vtable offset 1346 bind(L_holder_found); 1347 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1348 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1349 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1350 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1351 } 1352 1353 // virtual method calling 1354 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1355 RegisterOrConstant vtable_index, 1356 Register method_result) { 1357 assert(vtableEntry::size() * wordSize == 8, 1358 "adjust the scaling in the code below"); 1359 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1360 1361 if (vtable_index.is_register()) { 1362 lea(method_result, Address(recv_klass, 1363 vtable_index.as_register(), 1364 Address::lsl(LogBytesPerWord))); 1365 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1366 } else { 1367 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1368 ldr(method_result, 1369 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1370 } 1371 } 1372 1373 void MacroAssembler::check_klass_subtype(Register sub_klass, 1374 Register super_klass, 1375 Register temp_reg, 1376 Label& L_success) { 1377 Label L_failure; 1378 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1379 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1380 bind(L_failure); 1381 } 1382 1383 1384 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1385 Register super_klass, 1386 Register temp_reg, 1387 Label* L_success, 1388 Label* L_failure, 1389 Label* L_slow_path, 1390 RegisterOrConstant super_check_offset) { 1391 assert_different_registers(sub_klass, super_klass, temp_reg); 1392 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1393 if (super_check_offset.is_register()) { 1394 assert_different_registers(sub_klass, super_klass, 1395 super_check_offset.as_register()); 1396 } else if (must_load_sco) { 1397 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1398 } 1399 1400 Label L_fallthrough; 1401 int label_nulls = 0; 1402 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1403 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1404 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1405 assert(label_nulls <= 1, "at most one null in the batch"); 1406 1407 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1408 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1409 Address super_check_offset_addr(super_klass, sco_offset); 1410 1411 // Hacked jmp, which may only be used just before L_fallthrough. 1412 #define final_jmp(label) \ 1413 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1414 else b(label) /*omit semi*/ 1415 1416 // If the pointers are equal, we are done (e.g., String[] elements). 1417 // This self-check enables sharing of secondary supertype arrays among 1418 // non-primary types such as array-of-interface. Otherwise, each such 1419 // type would need its own customized SSA. 1420 // We move this check to the front of the fast path because many 1421 // type checks are in fact trivially successful in this manner, 1422 // so we get a nicely predicted branch right at the start of the check. 1423 cmp(sub_klass, super_klass); 1424 br(Assembler::EQ, *L_success); 1425 1426 // Check the supertype display: 1427 if (must_load_sco) { 1428 ldrw(temp_reg, super_check_offset_addr); 1429 super_check_offset = RegisterOrConstant(temp_reg); 1430 } 1431 Address super_check_addr(sub_klass, super_check_offset); 1432 ldr(rscratch1, super_check_addr); 1433 cmp(super_klass, rscratch1); // load displayed supertype 1434 1435 // This check has worked decisively for primary supers. 1436 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1437 // (Secondary supers are interfaces and very deeply nested subtypes.) 1438 // This works in the same check above because of a tricky aliasing 1439 // between the super_cache and the primary super display elements. 1440 // (The 'super_check_addr' can address either, as the case requires.) 1441 // Note that the cache is updated below if it does not help us find 1442 // what we need immediately. 1443 // So if it was a primary super, we can just fail immediately. 1444 // Otherwise, it's the slow path for us (no success at this point). 1445 1446 if (super_check_offset.is_register()) { 1447 br(Assembler::EQ, *L_success); 1448 subs(zr, super_check_offset.as_register(), sc_offset); 1449 if (L_failure == &L_fallthrough) { 1450 br(Assembler::EQ, *L_slow_path); 1451 } else { 1452 br(Assembler::NE, *L_failure); 1453 final_jmp(*L_slow_path); 1454 } 1455 } else if (super_check_offset.as_constant() == sc_offset) { 1456 // Need a slow path; fast failure is impossible. 1457 if (L_slow_path == &L_fallthrough) { 1458 br(Assembler::EQ, *L_success); 1459 } else { 1460 br(Assembler::NE, *L_slow_path); 1461 final_jmp(*L_success); 1462 } 1463 } else { 1464 // No slow path; it's a fast decision. 1465 if (L_failure == &L_fallthrough) { 1466 br(Assembler::EQ, *L_success); 1467 } else { 1468 br(Assembler::NE, *L_failure); 1469 final_jmp(*L_success); 1470 } 1471 } 1472 1473 bind(L_fallthrough); 1474 1475 #undef final_jmp 1476 } 1477 1478 // These two are taken from x86, but they look generally useful 1479 1480 // scans count pointer sized words at [addr] for occurrence of value, 1481 // generic 1482 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1483 Register scratch) { 1484 Label Lloop, Lexit; 1485 cbz(count, Lexit); 1486 bind(Lloop); 1487 ldr(scratch, post(addr, wordSize)); 1488 cmp(value, scratch); 1489 br(EQ, Lexit); 1490 sub(count, count, 1); 1491 cbnz(count, Lloop); 1492 bind(Lexit); 1493 } 1494 1495 // scans count 4 byte words at [addr] for occurrence of value, 1496 // generic 1497 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1498 Register scratch) { 1499 Label Lloop, Lexit; 1500 cbz(count, Lexit); 1501 bind(Lloop); 1502 ldrw(scratch, post(addr, wordSize)); 1503 cmpw(value, scratch); 1504 br(EQ, Lexit); 1505 sub(count, count, 1); 1506 cbnz(count, Lloop); 1507 bind(Lexit); 1508 } 1509 1510 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1511 Register super_klass, 1512 Register temp_reg, 1513 Register temp2_reg, 1514 Label* L_success, 1515 Label* L_failure, 1516 bool set_cond_codes) { 1517 // NB! Callers may assume that, when temp2_reg is a valid register, 1518 // this code sets it to a nonzero value. 1519 1520 assert_different_registers(sub_klass, super_klass, temp_reg); 1521 if (temp2_reg != noreg) 1522 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1523 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1524 1525 Label L_fallthrough; 1526 int label_nulls = 0; 1527 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1528 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1529 assert(label_nulls <= 1, "at most one null in the batch"); 1530 1531 // a couple of useful fields in sub_klass: 1532 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1533 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1534 Address secondary_supers_addr(sub_klass, ss_offset); 1535 Address super_cache_addr( sub_klass, sc_offset); 1536 1537 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1538 1539 // Do a linear scan of the secondary super-klass chain. 1540 // This code is rarely used, so simplicity is a virtue here. 1541 // The repne_scan instruction uses fixed registers, which we must spill. 1542 // Don't worry too much about pre-existing connections with the input regs. 1543 1544 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1545 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1546 1547 RegSet pushed_registers; 1548 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1549 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1550 1551 if (super_klass != r0) { 1552 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1553 } 1554 1555 push(pushed_registers, sp); 1556 1557 // Get super_klass value into r0 (even if it was in r5 or r2). 1558 if (super_klass != r0) { 1559 mov(r0, super_klass); 1560 } 1561 1562 #ifndef PRODUCT 1563 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1564 #endif //PRODUCT 1565 1566 // We will consult the secondary-super array. 1567 ldr(r5, secondary_supers_addr); 1568 // Load the array length. 1569 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1570 // Skip to start of data. 1571 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1572 1573 cmp(sp, zr); // Clear Z flag; SP is never zero 1574 // Scan R2 words at [R5] for an occurrence of R0. 1575 // Set NZ/Z based on last compare. 1576 repne_scan(r5, r0, r2, rscratch1); 1577 1578 // Unspill the temp. registers: 1579 pop(pushed_registers, sp); 1580 1581 br(Assembler::NE, *L_failure); 1582 1583 // Success. Cache the super we found and proceed in triumph. 1584 str(super_klass, super_cache_addr); 1585 1586 if (L_success != &L_fallthrough) { 1587 b(*L_success); 1588 } 1589 1590 #undef IS_A_TEMP 1591 1592 bind(L_fallthrough); 1593 } 1594 1595 // Ensure that the inline code and the stub are using the same registers. 1596 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1597 do { \ 1598 assert(r_super_klass == r0 && \ 1599 r_array_base == r1 && \ 1600 r_array_length == r2 && \ 1601 (r_array_index == r3 || r_array_index == noreg) && \ 1602 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1603 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1604 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1605 } while(0) 1606 1607 // Return true: we succeeded in generating this code 1608 bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 1609 Register r_super_klass, 1610 Register temp1, 1611 Register temp2, 1612 Register temp3, 1613 FloatRegister vtemp, 1614 Register result, 1615 u1 super_klass_slot, 1616 bool stub_is_near) { 1617 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1618 1619 Label L_fallthrough; 1620 1621 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1622 1623 const Register 1624 r_array_base = temp1, // r1 1625 r_array_length = temp2, // r2 1626 r_array_index = temp3, // r3 1627 r_bitmap = rscratch2; 1628 1629 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1630 1631 u1 bit = super_klass_slot; 1632 1633 // Make sure that result is nonzero if the TBZ below misses. 1634 mov(result, 1); 1635 1636 // We're going to need the bitmap in a vector reg and in a core reg, 1637 // so load both now. 1638 ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 1639 if (bit != 0) { 1640 ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset())); 1641 } 1642 // First check the bitmap to see if super_klass might be present. If 1643 // the bit is zero, we are certain that super_klass is not one of 1644 // the secondary supers. 1645 tbz(r_bitmap, bit, L_fallthrough); 1646 1647 // Get the first array index that can contain super_klass into r_array_index. 1648 if (bit != 0) { 1649 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1650 cnt(vtemp, T8B, vtemp); 1651 addv(vtemp, T8B, vtemp); 1652 fmovd(r_array_index, vtemp); 1653 } else { 1654 mov(r_array_index, (u1)1); 1655 } 1656 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1657 1658 // We will consult the secondary-super array. 1659 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1660 1661 // The value i in r_array_index is >= 1, so even though r_array_base 1662 // points to the length, we don't need to adjust it to point to the 1663 // data. 1664 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1665 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1666 1667 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1668 eor(result, result, r_super_klass); 1669 cbz(result, L_fallthrough); // Found a match 1670 1671 // Is there another entry to check? Consult the bitmap. 1672 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1673 1674 // Linear probe. 1675 if (bit != 0) { 1676 ror(r_bitmap, r_bitmap, bit); 1677 } 1678 1679 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1680 // The next slot to be inspected, by the stub we're about to call, 1681 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1682 // have been checked. 1683 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1684 if (stub_is_near) { 1685 bl(stub); 1686 } else { 1687 address call = trampoline_call(stub); 1688 if (call == nullptr) { 1689 return false; // trampoline allocation failed 1690 } 1691 } 1692 1693 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1694 1695 bind(L_fallthrough); 1696 1697 if (VerifySecondarySupers) { 1698 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1699 temp1, temp2, result); // r1, r2, r5 1700 } 1701 return true; 1702 } 1703 1704 // Called by code generated by check_klass_subtype_slow_path 1705 // above. This is called when there is a collision in the hashed 1706 // lookup in the secondary supers array. 1707 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1708 Register r_array_base, 1709 Register r_array_index, 1710 Register r_bitmap, 1711 Register temp1, 1712 Register result) { 1713 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1714 1715 const Register 1716 r_array_length = temp1, 1717 r_sub_klass = noreg; // unused 1718 1719 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1720 1721 Label L_fallthrough, L_huge; 1722 1723 // Load the array length. 1724 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1725 // And adjust the array base to point to the data. 1726 // NB! Effectively increments current slot index by 1. 1727 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1728 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1729 1730 // The bitmap is full to bursting. 1731 // Implicit invariant: BITMAP_FULL implies (length > 0) 1732 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1733 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2)); 1734 br(GT, L_huge); 1735 1736 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1737 // current slot (at secondary_supers[r_array_index]) has not yet 1738 // been inspected, and r_array_index may be out of bounds if we 1739 // wrapped around the end of the array. 1740 1741 { // This is conventional linear probing, but instead of terminating 1742 // when a null entry is found in the table, we maintain a bitmap 1743 // in which a 0 indicates missing entries. 1744 // The check above guarantees there are 0s in the bitmap, so the loop 1745 // eventually terminates. 1746 Label L_loop; 1747 bind(L_loop); 1748 1749 // Check for wraparound. 1750 cmp(r_array_index, r_array_length); 1751 csel(r_array_index, zr, r_array_index, GE); 1752 1753 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1754 eor(result, rscratch1, r_super_klass); 1755 cbz(result, L_fallthrough); 1756 1757 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1758 1759 ror(r_bitmap, r_bitmap, 1); 1760 add(r_array_index, r_array_index, 1); 1761 b(L_loop); 1762 } 1763 1764 { // Degenerate case: more than 64 secondary supers. 1765 // FIXME: We could do something smarter here, maybe a vectorized 1766 // comparison or a binary search, but is that worth any added 1767 // complexity? 1768 bind(L_huge); 1769 cmp(sp, zr); // Clear Z flag; SP is never zero 1770 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1771 cset(result, NE); // result == 0 iff we got a match. 1772 } 1773 1774 bind(L_fallthrough); 1775 } 1776 1777 // Make sure that the hashed lookup and a linear scan agree. 1778 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1779 Register r_super_klass, 1780 Register temp1, 1781 Register temp2, 1782 Register result) { 1783 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1784 1785 const Register 1786 r_array_base = temp1, 1787 r_array_length = temp2, 1788 r_array_index = noreg, // unused 1789 r_bitmap = noreg; // unused 1790 1791 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1792 1793 BLOCK_COMMENT("verify_secondary_supers_table {"); 1794 1795 // We will consult the secondary-super array. 1796 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1797 1798 // Load the array length. 1799 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1800 // And adjust the array base to point to the data. 1801 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1802 1803 cmp(sp, zr); // Clear Z flag; SP is never zero 1804 // Scan R2 words at [R5] for an occurrence of R0. 1805 // Set NZ/Z based on last compare. 1806 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 1807 // rscratch1 == 0 iff we got a match. 1808 cset(rscratch1, NE); 1809 1810 Label passed; 1811 cmp(result, zr); 1812 cset(result, NE); // normalize result to 0/1 for comparison 1813 1814 cmp(rscratch1, result); 1815 br(EQ, passed); 1816 { 1817 mov(r0, r_super_klass); // r0 <- r0 1818 mov(r1, r_sub_klass); // r1 <- r4 1819 mov(r2, /*expected*/rscratch1); // r2 <- r8 1820 mov(r3, result); // r3 <- r5 1821 mov(r4, (address)("mismatch")); // r4 <- const 1822 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 1823 should_not_reach_here(); 1824 } 1825 bind(passed); 1826 1827 BLOCK_COMMENT("} verify_secondary_supers_table"); 1828 } 1829 1830 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1831 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1832 assert_different_registers(klass, rthread, scratch); 1833 1834 Label L_fallthrough, L_tmp; 1835 if (L_fast_path == nullptr) { 1836 L_fast_path = &L_fallthrough; 1837 } else if (L_slow_path == nullptr) { 1838 L_slow_path = &L_fallthrough; 1839 } 1840 // Fast path check: class is fully initialized 1841 lea(scratch, Address(klass, InstanceKlass::init_state_offset())); 1842 ldarb(scratch, scratch); 1843 subs(zr, scratch, InstanceKlass::fully_initialized); 1844 br(Assembler::EQ, *L_fast_path); 1845 1846 // Fast path check: current thread is initializer thread 1847 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1848 cmp(rthread, scratch); 1849 1850 if (L_slow_path == &L_fallthrough) { 1851 br(Assembler::EQ, *L_fast_path); 1852 bind(*L_slow_path); 1853 } else if (L_fast_path == &L_fallthrough) { 1854 br(Assembler::NE, *L_slow_path); 1855 bind(*L_fast_path); 1856 } else { 1857 Unimplemented(); 1858 } 1859 } 1860 1861 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1862 if (!VerifyOops) return; 1863 1864 // Pass register number to verify_oop_subroutine 1865 const char* b = nullptr; 1866 { 1867 ResourceMark rm; 1868 stringStream ss; 1869 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1870 b = code_string(ss.as_string()); 1871 } 1872 BLOCK_COMMENT("verify_oop {"); 1873 1874 strip_return_address(); // This might happen within a stack frame. 1875 protect_return_address(); 1876 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1877 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1878 1879 mov(r0, reg); 1880 movptr(rscratch1, (uintptr_t)(address)b); 1881 1882 // call indirectly to solve generation ordering problem 1883 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1884 ldr(rscratch2, Address(rscratch2)); 1885 blr(rscratch2); 1886 1887 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1888 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1889 authenticate_return_address(); 1890 1891 BLOCK_COMMENT("} verify_oop"); 1892 } 1893 1894 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1895 if (!VerifyOops) return; 1896 1897 const char* b = nullptr; 1898 { 1899 ResourceMark rm; 1900 stringStream ss; 1901 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1902 b = code_string(ss.as_string()); 1903 } 1904 BLOCK_COMMENT("verify_oop_addr {"); 1905 1906 strip_return_address(); // This might happen within a stack frame. 1907 protect_return_address(); 1908 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1909 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1910 1911 // addr may contain sp so we will have to adjust it based on the 1912 // pushes that we just did. 1913 if (addr.uses(sp)) { 1914 lea(r0, addr); 1915 ldr(r0, Address(r0, 4 * wordSize)); 1916 } else { 1917 ldr(r0, addr); 1918 } 1919 movptr(rscratch1, (uintptr_t)(address)b); 1920 1921 // call indirectly to solve generation ordering problem 1922 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1923 ldr(rscratch2, Address(rscratch2)); 1924 blr(rscratch2); 1925 1926 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1927 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1928 authenticate_return_address(); 1929 1930 BLOCK_COMMENT("} verify_oop_addr"); 1931 } 1932 1933 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1934 int extra_slot_offset) { 1935 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1936 int stackElementSize = Interpreter::stackElementSize; 1937 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1938 #ifdef ASSERT 1939 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1940 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1941 #endif 1942 if (arg_slot.is_constant()) { 1943 return Address(esp, arg_slot.as_constant() * stackElementSize 1944 + offset); 1945 } else { 1946 add(rscratch1, esp, arg_slot.as_register(), 1947 ext::uxtx, exact_log2(stackElementSize)); 1948 return Address(rscratch1, offset); 1949 } 1950 } 1951 1952 void MacroAssembler::call_VM_leaf_base(address entry_point, 1953 int number_of_arguments, 1954 Label *retaddr) { 1955 Label E, L; 1956 1957 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1958 1959 mov(rscratch1, entry_point); 1960 blr(rscratch1); 1961 if (retaddr) 1962 bind(*retaddr); 1963 1964 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1965 } 1966 1967 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1968 call_VM_leaf_base(entry_point, number_of_arguments); 1969 } 1970 1971 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1972 pass_arg0(this, arg_0); 1973 call_VM_leaf_base(entry_point, 1); 1974 } 1975 1976 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1977 assert_different_registers(arg_1, c_rarg0); 1978 pass_arg0(this, arg_0); 1979 pass_arg1(this, arg_1); 1980 call_VM_leaf_base(entry_point, 2); 1981 } 1982 1983 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1984 Register arg_1, Register arg_2) { 1985 assert_different_registers(arg_1, c_rarg0); 1986 assert_different_registers(arg_2, c_rarg0, c_rarg1); 1987 pass_arg0(this, arg_0); 1988 pass_arg1(this, arg_1); 1989 pass_arg2(this, arg_2); 1990 call_VM_leaf_base(entry_point, 3); 1991 } 1992 1993 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1994 pass_arg0(this, arg_0); 1995 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1996 } 1997 1998 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1999 2000 assert_different_registers(arg_0, c_rarg1); 2001 pass_arg1(this, arg_1); 2002 pass_arg0(this, arg_0); 2003 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2004 } 2005 2006 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2007 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2008 assert_different_registers(arg_1, c_rarg2); 2009 pass_arg2(this, arg_2); 2010 pass_arg1(this, arg_1); 2011 pass_arg0(this, arg_0); 2012 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2013 } 2014 2015 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2016 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2017 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2018 assert_different_registers(arg_2, c_rarg3); 2019 pass_arg3(this, arg_3); 2020 pass_arg2(this, arg_2); 2021 pass_arg1(this, arg_1); 2022 pass_arg0(this, arg_0); 2023 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2024 } 2025 2026 void MacroAssembler::null_check(Register reg, int offset) { 2027 if (needs_explicit_null_check(offset)) { 2028 // provoke OS null exception if reg is null by 2029 // accessing M[reg] w/o changing any registers 2030 // NOTE: this is plenty to provoke a segv 2031 ldr(zr, Address(reg)); 2032 } else { 2033 // nothing to do, (later) access of M[reg + offset] 2034 // will provoke OS null exception if reg is null 2035 } 2036 } 2037 2038 // MacroAssembler protected routines needed to implement 2039 // public methods 2040 2041 void MacroAssembler::mov(Register r, Address dest) { 2042 code_section()->relocate(pc(), dest.rspec()); 2043 uint64_t imm64 = (uint64_t)dest.target(); 2044 movptr(r, imm64); 2045 } 2046 2047 // Move a constant pointer into r. In AArch64 mode the virtual 2048 // address space is 48 bits in size, so we only need three 2049 // instructions to create a patchable instruction sequence that can 2050 // reach anywhere. 2051 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2052 #ifndef PRODUCT 2053 { 2054 char buffer[64]; 2055 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2056 block_comment(buffer); 2057 } 2058 #endif 2059 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2060 movz(r, imm64 & 0xffff); 2061 imm64 >>= 16; 2062 movk(r, imm64 & 0xffff, 16); 2063 imm64 >>= 16; 2064 movk(r, imm64 & 0xffff, 32); 2065 } 2066 2067 // Macro to mov replicated immediate to vector register. 2068 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2069 // the upper 56/48/32 bits must be zeros for B/H/S type. 2070 // Vd will get the following values for different arrangements in T 2071 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2072 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2073 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2074 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2075 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2076 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2077 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2078 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2079 // Clobbers rscratch1 2080 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2081 assert(T != T1Q, "unsupported"); 2082 if (T == T1D || T == T2D) { 2083 int imm = operand_valid_for_movi_immediate(imm64, T); 2084 if (-1 != imm) { 2085 movi(Vd, T, imm); 2086 } else { 2087 mov(rscratch1, imm64); 2088 dup(Vd, T, rscratch1); 2089 } 2090 return; 2091 } 2092 2093 #ifdef ASSERT 2094 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2095 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2096 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2097 #endif 2098 int shift = operand_valid_for_movi_immediate(imm64, T); 2099 uint32_t imm32 = imm64 & 0xffffffffULL; 2100 if (shift >= 0) { 2101 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2102 } else { 2103 movw(rscratch1, imm32); 2104 dup(Vd, T, rscratch1); 2105 } 2106 } 2107 2108 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2109 { 2110 #ifndef PRODUCT 2111 { 2112 char buffer[64]; 2113 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2114 block_comment(buffer); 2115 } 2116 #endif 2117 if (operand_valid_for_logical_immediate(false, imm64)) { 2118 orr(dst, zr, imm64); 2119 } else { 2120 // we can use a combination of MOVZ or MOVN with 2121 // MOVK to build up the constant 2122 uint64_t imm_h[4]; 2123 int zero_count = 0; 2124 int neg_count = 0; 2125 int i; 2126 for (i = 0; i < 4; i++) { 2127 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2128 if (imm_h[i] == 0) { 2129 zero_count++; 2130 } else if (imm_h[i] == 0xffffL) { 2131 neg_count++; 2132 } 2133 } 2134 if (zero_count == 4) { 2135 // one MOVZ will do 2136 movz(dst, 0); 2137 } else if (neg_count == 4) { 2138 // one MOVN will do 2139 movn(dst, 0); 2140 } else if (zero_count == 3) { 2141 for (i = 0; i < 4; i++) { 2142 if (imm_h[i] != 0L) { 2143 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2144 break; 2145 } 2146 } 2147 } else if (neg_count == 3) { 2148 // one MOVN will do 2149 for (int i = 0; i < 4; i++) { 2150 if (imm_h[i] != 0xffffL) { 2151 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2152 break; 2153 } 2154 } 2155 } else if (zero_count == 2) { 2156 // one MOVZ and one MOVK will do 2157 for (i = 0; i < 3; i++) { 2158 if (imm_h[i] != 0L) { 2159 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2160 i++; 2161 break; 2162 } 2163 } 2164 for (;i < 4; i++) { 2165 if (imm_h[i] != 0L) { 2166 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2167 } 2168 } 2169 } else if (neg_count == 2) { 2170 // one MOVN and one MOVK will do 2171 for (i = 0; i < 4; i++) { 2172 if (imm_h[i] != 0xffffL) { 2173 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2174 i++; 2175 break; 2176 } 2177 } 2178 for (;i < 4; i++) { 2179 if (imm_h[i] != 0xffffL) { 2180 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2181 } 2182 } 2183 } else if (zero_count == 1) { 2184 // one MOVZ and two MOVKs will do 2185 for (i = 0; i < 4; i++) { 2186 if (imm_h[i] != 0L) { 2187 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2188 i++; 2189 break; 2190 } 2191 } 2192 for (;i < 4; i++) { 2193 if (imm_h[i] != 0x0L) { 2194 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2195 } 2196 } 2197 } else if (neg_count == 1) { 2198 // one MOVN and two MOVKs will do 2199 for (i = 0; i < 4; i++) { 2200 if (imm_h[i] != 0xffffL) { 2201 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2202 i++; 2203 break; 2204 } 2205 } 2206 for (;i < 4; i++) { 2207 if (imm_h[i] != 0xffffL) { 2208 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2209 } 2210 } 2211 } else { 2212 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2213 movz(dst, (uint32_t)imm_h[0], 0); 2214 for (i = 1; i < 4; i++) { 2215 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2216 } 2217 } 2218 } 2219 } 2220 2221 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2222 { 2223 #ifndef PRODUCT 2224 { 2225 char buffer[64]; 2226 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2227 block_comment(buffer); 2228 } 2229 #endif 2230 if (operand_valid_for_logical_immediate(true, imm32)) { 2231 orrw(dst, zr, imm32); 2232 } else { 2233 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2234 // constant 2235 uint32_t imm_h[2]; 2236 imm_h[0] = imm32 & 0xffff; 2237 imm_h[1] = ((imm32 >> 16) & 0xffff); 2238 if (imm_h[0] == 0) { 2239 movzw(dst, imm_h[1], 16); 2240 } else if (imm_h[0] == 0xffff) { 2241 movnw(dst, imm_h[1] ^ 0xffff, 16); 2242 } else if (imm_h[1] == 0) { 2243 movzw(dst, imm_h[0], 0); 2244 } else if (imm_h[1] == 0xffff) { 2245 movnw(dst, imm_h[0] ^ 0xffff, 0); 2246 } else { 2247 // use a MOVZ and MOVK (makes it easier to debug) 2248 movzw(dst, imm_h[0], 0); 2249 movkw(dst, imm_h[1], 16); 2250 } 2251 } 2252 } 2253 2254 // Form an address from base + offset in Rd. Rd may or may 2255 // not actually be used: you must use the Address that is returned. 2256 // It is up to you to ensure that the shift provided matches the size 2257 // of your data. 2258 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2259 if (Address::offset_ok_for_immed(byte_offset, shift)) 2260 // It fits; no need for any heroics 2261 return Address(base, byte_offset); 2262 2263 // Don't do anything clever with negative or misaligned offsets 2264 unsigned mask = (1 << shift) - 1; 2265 if (byte_offset < 0 || byte_offset & mask) { 2266 mov(Rd, byte_offset); 2267 add(Rd, base, Rd); 2268 return Address(Rd); 2269 } 2270 2271 // See if we can do this with two 12-bit offsets 2272 { 2273 uint64_t word_offset = byte_offset >> shift; 2274 uint64_t masked_offset = word_offset & 0xfff000; 2275 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2276 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2277 add(Rd, base, masked_offset << shift); 2278 word_offset -= masked_offset; 2279 return Address(Rd, word_offset << shift); 2280 } 2281 } 2282 2283 // Do it the hard way 2284 mov(Rd, byte_offset); 2285 add(Rd, base, Rd); 2286 return Address(Rd); 2287 } 2288 2289 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2290 bool want_remainder, Register scratch) 2291 { 2292 // Full implementation of Java idiv and irem. The function 2293 // returns the (pc) offset of the div instruction - may be needed 2294 // for implicit exceptions. 2295 // 2296 // constraint : ra/rb =/= scratch 2297 // normal case 2298 // 2299 // input : ra: dividend 2300 // rb: divisor 2301 // 2302 // result: either 2303 // quotient (= ra idiv rb) 2304 // remainder (= ra irem rb) 2305 2306 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2307 2308 int idivl_offset = offset(); 2309 if (! want_remainder) { 2310 sdivw(result, ra, rb); 2311 } else { 2312 sdivw(scratch, ra, rb); 2313 Assembler::msubw(result, scratch, rb, ra); 2314 } 2315 2316 return idivl_offset; 2317 } 2318 2319 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2320 bool want_remainder, Register scratch) 2321 { 2322 // Full implementation of Java ldiv and lrem. The function 2323 // returns the (pc) offset of the div instruction - may be needed 2324 // for implicit exceptions. 2325 // 2326 // constraint : ra/rb =/= scratch 2327 // normal case 2328 // 2329 // input : ra: dividend 2330 // rb: divisor 2331 // 2332 // result: either 2333 // quotient (= ra idiv rb) 2334 // remainder (= ra irem rb) 2335 2336 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2337 2338 int idivq_offset = offset(); 2339 if (! want_remainder) { 2340 sdiv(result, ra, rb); 2341 } else { 2342 sdiv(scratch, ra, rb); 2343 Assembler::msub(result, scratch, rb, ra); 2344 } 2345 2346 return idivq_offset; 2347 } 2348 2349 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2350 address prev = pc() - NativeMembar::instruction_size; 2351 address last = code()->last_insn(); 2352 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2353 NativeMembar *bar = NativeMembar_at(prev); 2354 if (AlwaysMergeDMB) { 2355 bar->set_kind(bar->get_kind() | order_constraint); 2356 BLOCK_COMMENT("merged membar(always)"); 2357 return; 2358 } 2359 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2360 // doing so would introduce a StoreLoad which the caller did not 2361 // intend 2362 if (bar->get_kind() == order_constraint 2363 || bar->get_kind() == AnyAny 2364 || order_constraint == AnyAny) { 2365 // We are merging two memory barrier instructions. On AArch64 we 2366 // can do this simply by ORing them together. 2367 bar->set_kind(bar->get_kind() | order_constraint); 2368 BLOCK_COMMENT("merged membar"); 2369 return; 2370 } else { 2371 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2372 // We need check the last 2 instructions 2373 address prev2 = prev - NativeMembar::instruction_size; 2374 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2375 NativeMembar *bar2 = NativeMembar_at(prev2); 2376 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2377 BLOCK_COMMENT("merged membar(elided)"); 2378 return; 2379 } 2380 } 2381 } 2382 code()->set_last_insn(pc()); 2383 dmb(Assembler::barrier(order_constraint)); 2384 } 2385 2386 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2387 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2388 merge_ldst(rt, adr, size_in_bytes, is_store); 2389 code()->clear_last_insn(); 2390 return true; 2391 } else { 2392 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2393 const uint64_t mask = size_in_bytes - 1; 2394 if (adr.getMode() == Address::base_plus_offset && 2395 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2396 code()->set_last_insn(pc()); 2397 } 2398 return false; 2399 } 2400 } 2401 2402 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2403 // We always try to merge two adjacent loads into one ldp. 2404 if (!try_merge_ldst(Rx, adr, 8, false)) { 2405 Assembler::ldr(Rx, adr); 2406 } 2407 } 2408 2409 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2410 // We always try to merge two adjacent loads into one ldp. 2411 if (!try_merge_ldst(Rw, adr, 4, false)) { 2412 Assembler::ldrw(Rw, adr); 2413 } 2414 } 2415 2416 void MacroAssembler::str(Register Rx, const Address &adr) { 2417 // We always try to merge two adjacent stores into one stp. 2418 if (!try_merge_ldst(Rx, adr, 8, true)) { 2419 Assembler::str(Rx, adr); 2420 } 2421 } 2422 2423 void MacroAssembler::strw(Register Rw, const Address &adr) { 2424 // We always try to merge two adjacent stores into one stp. 2425 if (!try_merge_ldst(Rw, adr, 4, true)) { 2426 Assembler::strw(Rw, adr); 2427 } 2428 } 2429 2430 // MacroAssembler routines found actually to be needed 2431 2432 void MacroAssembler::push(Register src) 2433 { 2434 str(src, Address(pre(esp, -1 * wordSize))); 2435 } 2436 2437 void MacroAssembler::pop(Register dst) 2438 { 2439 ldr(dst, Address(post(esp, 1 * wordSize))); 2440 } 2441 2442 // Note: load_unsigned_short used to be called load_unsigned_word. 2443 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2444 int off = offset(); 2445 ldrh(dst, src); 2446 return off; 2447 } 2448 2449 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2450 int off = offset(); 2451 ldrb(dst, src); 2452 return off; 2453 } 2454 2455 int MacroAssembler::load_signed_short(Register dst, Address src) { 2456 int off = offset(); 2457 ldrsh(dst, src); 2458 return off; 2459 } 2460 2461 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2462 int off = offset(); 2463 ldrsb(dst, src); 2464 return off; 2465 } 2466 2467 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2468 int off = offset(); 2469 ldrshw(dst, src); 2470 return off; 2471 } 2472 2473 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2474 int off = offset(); 2475 ldrsbw(dst, src); 2476 return off; 2477 } 2478 2479 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2480 switch (size_in_bytes) { 2481 case 8: ldr(dst, src); break; 2482 case 4: ldrw(dst, src); break; 2483 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2484 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2485 default: ShouldNotReachHere(); 2486 } 2487 } 2488 2489 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2490 switch (size_in_bytes) { 2491 case 8: str(src, dst); break; 2492 case 4: strw(src, dst); break; 2493 case 2: strh(src, dst); break; 2494 case 1: strb(src, dst); break; 2495 default: ShouldNotReachHere(); 2496 } 2497 } 2498 2499 void MacroAssembler::decrementw(Register reg, int value) 2500 { 2501 if (value < 0) { incrementw(reg, -value); return; } 2502 if (value == 0) { return; } 2503 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2504 /* else */ { 2505 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2506 movw(rscratch2, (unsigned)value); 2507 subw(reg, reg, rscratch2); 2508 } 2509 } 2510 2511 void MacroAssembler::decrement(Register reg, int value) 2512 { 2513 if (value < 0) { increment(reg, -value); return; } 2514 if (value == 0) { return; } 2515 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2516 /* else */ { 2517 assert(reg != rscratch2, "invalid dst for register decrement"); 2518 mov(rscratch2, (uint64_t)value); 2519 sub(reg, reg, rscratch2); 2520 } 2521 } 2522 2523 void MacroAssembler::decrementw(Address dst, int value) 2524 { 2525 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2526 if (dst.getMode() == Address::literal) { 2527 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2528 lea(rscratch2, dst); 2529 dst = Address(rscratch2); 2530 } 2531 ldrw(rscratch1, dst); 2532 decrementw(rscratch1, value); 2533 strw(rscratch1, dst); 2534 } 2535 2536 void MacroAssembler::decrement(Address dst, int value) 2537 { 2538 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2539 if (dst.getMode() == Address::literal) { 2540 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2541 lea(rscratch2, dst); 2542 dst = Address(rscratch2); 2543 } 2544 ldr(rscratch1, dst); 2545 decrement(rscratch1, value); 2546 str(rscratch1, dst); 2547 } 2548 2549 void MacroAssembler::incrementw(Register reg, int value) 2550 { 2551 if (value < 0) { decrementw(reg, -value); return; } 2552 if (value == 0) { return; } 2553 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2554 /* else */ { 2555 assert(reg != rscratch2, "invalid dst for register increment"); 2556 movw(rscratch2, (unsigned)value); 2557 addw(reg, reg, rscratch2); 2558 } 2559 } 2560 2561 void MacroAssembler::increment(Register reg, int value) 2562 { 2563 if (value < 0) { decrement(reg, -value); return; } 2564 if (value == 0) { return; } 2565 if (value < (1 << 12)) { add(reg, reg, value); return; } 2566 /* else */ { 2567 assert(reg != rscratch2, "invalid dst for register increment"); 2568 movw(rscratch2, (unsigned)value); 2569 add(reg, reg, rscratch2); 2570 } 2571 } 2572 2573 void MacroAssembler::incrementw(Address dst, int value) 2574 { 2575 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2576 if (dst.getMode() == Address::literal) { 2577 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2578 lea(rscratch2, dst); 2579 dst = Address(rscratch2); 2580 } 2581 ldrw(rscratch1, dst); 2582 incrementw(rscratch1, value); 2583 strw(rscratch1, dst); 2584 } 2585 2586 void MacroAssembler::increment(Address dst, int value) 2587 { 2588 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2589 if (dst.getMode() == Address::literal) { 2590 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2591 lea(rscratch2, dst); 2592 dst = Address(rscratch2); 2593 } 2594 ldr(rscratch1, dst); 2595 increment(rscratch1, value); 2596 str(rscratch1, dst); 2597 } 2598 2599 // Push lots of registers in the bit set supplied. Don't push sp. 2600 // Return the number of words pushed 2601 int MacroAssembler::push(unsigned int bitset, Register stack) { 2602 int words_pushed = 0; 2603 2604 // Scan bitset to accumulate register pairs 2605 unsigned char regs[32]; 2606 int count = 0; 2607 for (int reg = 0; reg <= 30; reg++) { 2608 if (1 & bitset) 2609 regs[count++] = reg; 2610 bitset >>= 1; 2611 } 2612 regs[count++] = zr->raw_encoding(); 2613 count &= ~1; // Only push an even number of regs 2614 2615 if (count) { 2616 stp(as_Register(regs[0]), as_Register(regs[1]), 2617 Address(pre(stack, -count * wordSize))); 2618 words_pushed += 2; 2619 } 2620 for (int i = 2; i < count; i += 2) { 2621 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2622 Address(stack, i * wordSize)); 2623 words_pushed += 2; 2624 } 2625 2626 assert(words_pushed == count, "oops, pushed != count"); 2627 2628 return count; 2629 } 2630 2631 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2632 int words_pushed = 0; 2633 2634 // Scan bitset to accumulate register pairs 2635 unsigned char regs[32]; 2636 int count = 0; 2637 for (int reg = 0; reg <= 30; reg++) { 2638 if (1 & bitset) 2639 regs[count++] = reg; 2640 bitset >>= 1; 2641 } 2642 regs[count++] = zr->raw_encoding(); 2643 count &= ~1; 2644 2645 for (int i = 2; i < count; i += 2) { 2646 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2647 Address(stack, i * wordSize)); 2648 words_pushed += 2; 2649 } 2650 if (count) { 2651 ldp(as_Register(regs[0]), as_Register(regs[1]), 2652 Address(post(stack, count * wordSize))); 2653 words_pushed += 2; 2654 } 2655 2656 assert(words_pushed == count, "oops, pushed != count"); 2657 2658 return count; 2659 } 2660 2661 // Push lots of registers in the bit set supplied. Don't push sp. 2662 // Return the number of dwords pushed 2663 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2664 int words_pushed = 0; 2665 bool use_sve = false; 2666 int sve_vector_size_in_bytes = 0; 2667 2668 #ifdef COMPILER2 2669 use_sve = Matcher::supports_scalable_vector(); 2670 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2671 #endif 2672 2673 // Scan bitset to accumulate register pairs 2674 unsigned char regs[32]; 2675 int count = 0; 2676 for (int reg = 0; reg <= 31; reg++) { 2677 if (1 & bitset) 2678 regs[count++] = reg; 2679 bitset >>= 1; 2680 } 2681 2682 if (count == 0) { 2683 return 0; 2684 } 2685 2686 if (mode == PushPopFull) { 2687 if (use_sve && sve_vector_size_in_bytes > 16) { 2688 mode = PushPopSVE; 2689 } else { 2690 mode = PushPopNeon; 2691 } 2692 } 2693 2694 #ifndef PRODUCT 2695 { 2696 char buffer[48]; 2697 if (mode == PushPopSVE) { 2698 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2699 } else if (mode == PushPopNeon) { 2700 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2701 } else { 2702 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2703 } 2704 block_comment(buffer); 2705 } 2706 #endif 2707 2708 if (mode == PushPopSVE) { 2709 sub(stack, stack, sve_vector_size_in_bytes * count); 2710 for (int i = 0; i < count; i++) { 2711 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2712 } 2713 return count * sve_vector_size_in_bytes / 8; 2714 } 2715 2716 if (mode == PushPopNeon) { 2717 if (count == 1) { 2718 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2719 return 2; 2720 } 2721 2722 bool odd = (count & 1) == 1; 2723 int push_slots = count + (odd ? 1 : 0); 2724 2725 // Always pushing full 128 bit registers. 2726 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2727 words_pushed += 2; 2728 2729 for (int i = 2; i + 1 < count; i += 2) { 2730 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2731 words_pushed += 2; 2732 } 2733 2734 if (odd) { 2735 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2736 words_pushed++; 2737 } 2738 2739 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2740 return count * 2; 2741 } 2742 2743 if (mode == PushPopFp) { 2744 bool odd = (count & 1) == 1; 2745 int push_slots = count + (odd ? 1 : 0); 2746 2747 if (count == 1) { 2748 // Stack pointer must be 16 bytes aligned 2749 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2750 return 1; 2751 } 2752 2753 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2754 words_pushed += 2; 2755 2756 for (int i = 2; i + 1 < count; i += 2) { 2757 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2758 words_pushed += 2; 2759 } 2760 2761 if (odd) { 2762 // Stack pointer must be 16 bytes aligned 2763 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2764 words_pushed++; 2765 } 2766 2767 assert(words_pushed == count, "oops, pushed != count"); 2768 2769 return count; 2770 } 2771 2772 return 0; 2773 } 2774 2775 // Return the number of dwords popped 2776 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2777 int words_pushed = 0; 2778 bool use_sve = false; 2779 int sve_vector_size_in_bytes = 0; 2780 2781 #ifdef COMPILER2 2782 use_sve = Matcher::supports_scalable_vector(); 2783 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2784 #endif 2785 // Scan bitset to accumulate register pairs 2786 unsigned char regs[32]; 2787 int count = 0; 2788 for (int reg = 0; reg <= 31; reg++) { 2789 if (1 & bitset) 2790 regs[count++] = reg; 2791 bitset >>= 1; 2792 } 2793 2794 if (count == 0) { 2795 return 0; 2796 } 2797 2798 if (mode == PushPopFull) { 2799 if (use_sve && sve_vector_size_in_bytes > 16) { 2800 mode = PushPopSVE; 2801 } else { 2802 mode = PushPopNeon; 2803 } 2804 } 2805 2806 #ifndef PRODUCT 2807 { 2808 char buffer[48]; 2809 if (mode == PushPopSVE) { 2810 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 2811 } else if (mode == PushPopNeon) { 2812 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 2813 } else { 2814 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 2815 } 2816 block_comment(buffer); 2817 } 2818 #endif 2819 2820 if (mode == PushPopSVE) { 2821 for (int i = count - 1; i >= 0; i--) { 2822 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2823 } 2824 add(stack, stack, sve_vector_size_in_bytes * count); 2825 return count * sve_vector_size_in_bytes / 8; 2826 } 2827 2828 if (mode == PushPopNeon) { 2829 if (count == 1) { 2830 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2831 return 2; 2832 } 2833 2834 bool odd = (count & 1) == 1; 2835 int push_slots = count + (odd ? 1 : 0); 2836 2837 if (odd) { 2838 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2839 words_pushed++; 2840 } 2841 2842 for (int i = 2; i + 1 < count; i += 2) { 2843 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2844 words_pushed += 2; 2845 } 2846 2847 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2848 words_pushed += 2; 2849 2850 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2851 2852 return count * 2; 2853 } 2854 2855 if (mode == PushPopFp) { 2856 bool odd = (count & 1) == 1; 2857 int push_slots = count + (odd ? 1 : 0); 2858 2859 if (count == 1) { 2860 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 2861 return 1; 2862 } 2863 2864 if (odd) { 2865 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2866 words_pushed++; 2867 } 2868 2869 for (int i = 2; i + 1 < count; i += 2) { 2870 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2871 words_pushed += 2; 2872 } 2873 2874 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 2875 words_pushed += 2; 2876 2877 assert(words_pushed == count, "oops, pushed != count"); 2878 2879 return count; 2880 } 2881 2882 return 0; 2883 } 2884 2885 // Return the number of dwords pushed 2886 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2887 bool use_sve = false; 2888 int sve_predicate_size_in_slots = 0; 2889 2890 #ifdef COMPILER2 2891 use_sve = Matcher::supports_scalable_vector(); 2892 if (use_sve) { 2893 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2894 } 2895 #endif 2896 2897 if (!use_sve) { 2898 return 0; 2899 } 2900 2901 unsigned char regs[PRegister::number_of_registers]; 2902 int count = 0; 2903 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2904 if (1 & bitset) 2905 regs[count++] = reg; 2906 bitset >>= 1; 2907 } 2908 2909 if (count == 0) { 2910 return 0; 2911 } 2912 2913 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2914 VMRegImpl::stack_slot_size * count, 16); 2915 sub(stack, stack, total_push_bytes); 2916 for (int i = 0; i < count; i++) { 2917 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2918 } 2919 return total_push_bytes / 8; 2920 } 2921 2922 // Return the number of dwords popped 2923 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2924 bool use_sve = false; 2925 int sve_predicate_size_in_slots = 0; 2926 2927 #ifdef COMPILER2 2928 use_sve = Matcher::supports_scalable_vector(); 2929 if (use_sve) { 2930 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2931 } 2932 #endif 2933 2934 if (!use_sve) { 2935 return 0; 2936 } 2937 2938 unsigned char regs[PRegister::number_of_registers]; 2939 int count = 0; 2940 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2941 if (1 & bitset) 2942 regs[count++] = reg; 2943 bitset >>= 1; 2944 } 2945 2946 if (count == 0) { 2947 return 0; 2948 } 2949 2950 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2951 VMRegImpl::stack_slot_size * count, 16); 2952 for (int i = count - 1; i >= 0; i--) { 2953 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2954 } 2955 add(stack, stack, total_pop_bytes); 2956 return total_pop_bytes / 8; 2957 } 2958 2959 #ifdef ASSERT 2960 void MacroAssembler::verify_heapbase(const char* msg) { 2961 #if 0 2962 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2963 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2964 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2965 // rheapbase is allocated as general register 2966 return; 2967 } 2968 if (CheckCompressedOops) { 2969 Label ok; 2970 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2971 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr())); 2972 br(Assembler::EQ, ok); 2973 stop(msg); 2974 bind(ok); 2975 pop(1 << rscratch1->encoding(), sp); 2976 } 2977 #endif 2978 } 2979 #endif 2980 2981 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 2982 assert_different_registers(value, tmp1, tmp2); 2983 Label done, tagged, weak_tagged; 2984 2985 cbz(value, done); // Use null as-is. 2986 tst(value, JNIHandles::tag_mask); // Test for tag. 2987 br(Assembler::NE, tagged); 2988 2989 // Resolve local handle 2990 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 2991 verify_oop(value); 2992 b(done); 2993 2994 bind(tagged); 2995 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 2996 tbnz(value, 0, weak_tagged); // Test for weak tag. 2997 2998 // Resolve global handle 2999 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3000 verify_oop(value); 3001 b(done); 3002 3003 bind(weak_tagged); 3004 // Resolve jweak. 3005 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3006 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3007 verify_oop(value); 3008 3009 bind(done); 3010 } 3011 3012 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3013 assert_different_registers(value, tmp1, tmp2); 3014 Label done; 3015 3016 cbz(value, done); // Use null as-is. 3017 3018 #ifdef ASSERT 3019 { 3020 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3021 Label valid_global_tag; 3022 tbnz(value, 1, valid_global_tag); // Test for global tag 3023 stop("non global jobject using resolve_global_jobject"); 3024 bind(valid_global_tag); 3025 } 3026 #endif 3027 3028 // Resolve global handle 3029 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3030 verify_oop(value); 3031 3032 bind(done); 3033 } 3034 3035 void MacroAssembler::stop(const char* msg) { 3036 BLOCK_COMMENT(msg); 3037 dcps1(0xdeae); 3038 emit_int64((uintptr_t)msg); 3039 } 3040 3041 void MacroAssembler::unimplemented(const char* what) { 3042 const char* buf = nullptr; 3043 { 3044 ResourceMark rm; 3045 stringStream ss; 3046 ss.print("unimplemented: %s", what); 3047 buf = code_string(ss.as_string()); 3048 } 3049 stop(buf); 3050 } 3051 3052 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3053 #ifdef ASSERT 3054 Label OK; 3055 br(cc, OK); 3056 stop(msg); 3057 bind(OK); 3058 #endif 3059 } 3060 3061 // If a constant does not fit in an immediate field, generate some 3062 // number of MOV instructions and then perform the operation. 3063 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3064 add_sub_imm_insn insn1, 3065 add_sub_reg_insn insn2, 3066 bool is32) { 3067 assert(Rd != zr, "Rd = zr and not setting flags?"); 3068 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3069 if (fits) { 3070 (this->*insn1)(Rd, Rn, imm); 3071 } else { 3072 if (uabs(imm) < (1 << 24)) { 3073 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3074 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3075 } else { 3076 assert_different_registers(Rd, Rn); 3077 mov(Rd, imm); 3078 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3079 } 3080 } 3081 } 3082 3083 // Separate vsn which sets the flags. Optimisations are more restricted 3084 // because we must set the flags correctly. 3085 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3086 add_sub_imm_insn insn1, 3087 add_sub_reg_insn insn2, 3088 bool is32) { 3089 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3090 if (fits) { 3091 (this->*insn1)(Rd, Rn, imm); 3092 } else { 3093 assert_different_registers(Rd, Rn); 3094 assert(Rd != zr, "overflow in immediate operand"); 3095 mov(Rd, imm); 3096 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3097 } 3098 } 3099 3100 3101 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3102 if (increment.is_register()) { 3103 add(Rd, Rn, increment.as_register()); 3104 } else { 3105 add(Rd, Rn, increment.as_constant()); 3106 } 3107 } 3108 3109 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3110 if (increment.is_register()) { 3111 addw(Rd, Rn, increment.as_register()); 3112 } else { 3113 addw(Rd, Rn, increment.as_constant()); 3114 } 3115 } 3116 3117 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3118 if (decrement.is_register()) { 3119 sub(Rd, Rn, decrement.as_register()); 3120 } else { 3121 sub(Rd, Rn, decrement.as_constant()); 3122 } 3123 } 3124 3125 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3126 if (decrement.is_register()) { 3127 subw(Rd, Rn, decrement.as_register()); 3128 } else { 3129 subw(Rd, Rn, decrement.as_constant()); 3130 } 3131 } 3132 3133 void MacroAssembler::reinit_heapbase() 3134 { 3135 if (UseCompressedOops) { 3136 if (Universe::is_fully_initialized()) { 3137 mov(rheapbase, CompressedOops::base()); 3138 } else { 3139 lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3140 ldr(rheapbase, Address(rheapbase)); 3141 } 3142 } 3143 } 3144 3145 // this simulates the behaviour of the x86 cmpxchg instruction using a 3146 // load linked/store conditional pair. we use the acquire/release 3147 // versions of these instructions so that we flush pending writes as 3148 // per Java semantics. 3149 3150 // n.b the x86 version assumes the old value to be compared against is 3151 // in rax and updates rax with the value located in memory if the 3152 // cmpxchg fails. we supply a register for the old value explicitly 3153 3154 // the aarch64 load linked/store conditional instructions do not 3155 // accept an offset. so, unlike x86, we must provide a plain register 3156 // to identify the memory word to be compared/exchanged rather than a 3157 // register+offset Address. 3158 3159 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3160 Label &succeed, Label *fail) { 3161 // oldv holds comparison value 3162 // newv holds value to write in exchange 3163 // addr identifies memory word to compare against/update 3164 if (UseLSE) { 3165 mov(tmp, oldv); 3166 casal(Assembler::xword, oldv, newv, addr); 3167 cmp(tmp, oldv); 3168 br(Assembler::EQ, succeed); 3169 membar(AnyAny); 3170 } else { 3171 Label retry_load, nope; 3172 prfm(Address(addr), PSTL1STRM); 3173 bind(retry_load); 3174 // flush and load exclusive from the memory location 3175 // and fail if it is not what we expect 3176 ldaxr(tmp, addr); 3177 cmp(tmp, oldv); 3178 br(Assembler::NE, nope); 3179 // if we store+flush with no intervening write tmp will be zero 3180 stlxr(tmp, newv, addr); 3181 cbzw(tmp, succeed); 3182 // retry so we only ever return after a load fails to compare 3183 // ensures we don't return a stale value after a failed write. 3184 b(retry_load); 3185 // if the memory word differs we return it in oldv and signal a fail 3186 bind(nope); 3187 membar(AnyAny); 3188 mov(oldv, tmp); 3189 } 3190 if (fail) 3191 b(*fail); 3192 } 3193 3194 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3195 Label &succeed, Label *fail) { 3196 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3197 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3198 } 3199 3200 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3201 Label &succeed, Label *fail) { 3202 // oldv holds comparison value 3203 // newv holds value to write in exchange 3204 // addr identifies memory word to compare against/update 3205 // tmp returns 0/1 for success/failure 3206 if (UseLSE) { 3207 mov(tmp, oldv); 3208 casal(Assembler::word, oldv, newv, addr); 3209 cmp(tmp, oldv); 3210 br(Assembler::EQ, succeed); 3211 membar(AnyAny); 3212 } else { 3213 Label retry_load, nope; 3214 prfm(Address(addr), PSTL1STRM); 3215 bind(retry_load); 3216 // flush and load exclusive from the memory location 3217 // and fail if it is not what we expect 3218 ldaxrw(tmp, addr); 3219 cmp(tmp, oldv); 3220 br(Assembler::NE, nope); 3221 // if we store+flush with no intervening write tmp will be zero 3222 stlxrw(tmp, newv, addr); 3223 cbzw(tmp, succeed); 3224 // retry so we only ever return after a load fails to compare 3225 // ensures we don't return a stale value after a failed write. 3226 b(retry_load); 3227 // if the memory word differs we return it in oldv and signal a fail 3228 bind(nope); 3229 membar(AnyAny); 3230 mov(oldv, tmp); 3231 } 3232 if (fail) 3233 b(*fail); 3234 } 3235 3236 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3237 // doesn't retry and may fail spuriously. If the oldval is wanted, 3238 // Pass a register for the result, otherwise pass noreg. 3239 3240 // Clobbers rscratch1 3241 void MacroAssembler::cmpxchg(Register addr, Register expected, 3242 Register new_val, 3243 enum operand_size size, 3244 bool acquire, bool release, 3245 bool weak, 3246 Register result) { 3247 if (result == noreg) result = rscratch1; 3248 BLOCK_COMMENT("cmpxchg {"); 3249 if (UseLSE) { 3250 mov(result, expected); 3251 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3252 compare_eq(result, expected, size); 3253 #ifdef ASSERT 3254 // Poison rscratch1 which is written on !UseLSE branch 3255 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3256 #endif 3257 } else { 3258 Label retry_load, done; 3259 prfm(Address(addr), PSTL1STRM); 3260 bind(retry_load); 3261 load_exclusive(result, addr, size, acquire); 3262 compare_eq(result, expected, size); 3263 br(Assembler::NE, done); 3264 store_exclusive(rscratch1, new_val, addr, size, release); 3265 if (weak) { 3266 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3267 } else { 3268 cbnzw(rscratch1, retry_load); 3269 } 3270 bind(done); 3271 } 3272 BLOCK_COMMENT("} cmpxchg"); 3273 } 3274 3275 // A generic comparison. Only compares for equality, clobbers rscratch1. 3276 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3277 if (size == xword) { 3278 cmp(rm, rn); 3279 } else if (size == word) { 3280 cmpw(rm, rn); 3281 } else if (size == halfword) { 3282 eorw(rscratch1, rm, rn); 3283 ands(zr, rscratch1, 0xffff); 3284 } else if (size == byte) { 3285 eorw(rscratch1, rm, rn); 3286 ands(zr, rscratch1, 0xff); 3287 } else { 3288 ShouldNotReachHere(); 3289 } 3290 } 3291 3292 3293 static bool different(Register a, RegisterOrConstant b, Register c) { 3294 if (b.is_constant()) 3295 return a != c; 3296 else 3297 return a != b.as_register() && a != c && b.as_register() != c; 3298 } 3299 3300 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3301 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3302 if (UseLSE) { \ 3303 prev = prev->is_valid() ? prev : zr; \ 3304 if (incr.is_register()) { \ 3305 AOP(sz, incr.as_register(), prev, addr); \ 3306 } else { \ 3307 mov(rscratch2, incr.as_constant()); \ 3308 AOP(sz, rscratch2, prev, addr); \ 3309 } \ 3310 return; \ 3311 } \ 3312 Register result = rscratch2; \ 3313 if (prev->is_valid()) \ 3314 result = different(prev, incr, addr) ? prev : rscratch2; \ 3315 \ 3316 Label retry_load; \ 3317 prfm(Address(addr), PSTL1STRM); \ 3318 bind(retry_load); \ 3319 LDXR(result, addr); \ 3320 OP(rscratch1, result, incr); \ 3321 STXR(rscratch2, rscratch1, addr); \ 3322 cbnzw(rscratch2, retry_load); \ 3323 if (prev->is_valid() && prev != result) { \ 3324 IOP(prev, rscratch1, incr); \ 3325 } \ 3326 } 3327 3328 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3329 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3330 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3331 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3332 3333 #undef ATOMIC_OP 3334 3335 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3336 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3337 if (UseLSE) { \ 3338 prev = prev->is_valid() ? prev : zr; \ 3339 AOP(sz, newv, prev, addr); \ 3340 return; \ 3341 } \ 3342 Register result = rscratch2; \ 3343 if (prev->is_valid()) \ 3344 result = different(prev, newv, addr) ? prev : rscratch2; \ 3345 \ 3346 Label retry_load; \ 3347 prfm(Address(addr), PSTL1STRM); \ 3348 bind(retry_load); \ 3349 LDXR(result, addr); \ 3350 STXR(rscratch1, newv, addr); \ 3351 cbnzw(rscratch1, retry_load); \ 3352 if (prev->is_valid() && prev != result) \ 3353 mov(prev, result); \ 3354 } 3355 3356 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3357 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3358 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3359 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3360 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3361 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3362 3363 #undef ATOMIC_XCHG 3364 3365 #ifndef PRODUCT 3366 extern "C" void findpc(intptr_t x); 3367 #endif 3368 3369 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3370 { 3371 // In order to get locks to work, we need to fake a in_VM state 3372 if (ShowMessageBoxOnError ) { 3373 JavaThread* thread = JavaThread::current(); 3374 JavaThreadState saved_state = thread->thread_state(); 3375 thread->set_thread_state(_thread_in_vm); 3376 #ifndef PRODUCT 3377 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3378 ttyLocker ttyl; 3379 BytecodeCounter::print(); 3380 } 3381 #endif 3382 if (os::message_box(msg, "Execution stopped, print registers?")) { 3383 ttyLocker ttyl; 3384 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3385 #ifndef PRODUCT 3386 tty->cr(); 3387 findpc(pc); 3388 tty->cr(); 3389 #endif 3390 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3391 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3392 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3393 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3394 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3395 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3396 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3397 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3398 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3399 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3400 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3401 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3402 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3403 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3404 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3405 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3406 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3407 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3408 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3409 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3410 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3411 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3412 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3413 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3414 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3415 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3416 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3417 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3418 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3419 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3420 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3421 BREAKPOINT; 3422 } 3423 } 3424 fatal("DEBUG MESSAGE: %s", msg); 3425 } 3426 3427 RegSet MacroAssembler::call_clobbered_gp_registers() { 3428 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3429 #ifndef R18_RESERVED 3430 regs += r18_tls; 3431 #endif 3432 return regs; 3433 } 3434 3435 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3436 int step = 4 * wordSize; 3437 push(call_clobbered_gp_registers() - exclude, sp); 3438 sub(sp, sp, step); 3439 mov(rscratch1, -step); 3440 // Push v0-v7, v16-v31. 3441 for (int i = 31; i>= 4; i -= 4) { 3442 if (i <= v7->encoding() || i >= v16->encoding()) 3443 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3444 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3445 } 3446 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3447 as_FloatRegister(3), T1D, Address(sp)); 3448 } 3449 3450 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3451 for (int i = 0; i < 32; i += 4) { 3452 if (i <= v7->encoding() || i >= v16->encoding()) 3453 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3454 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3455 } 3456 3457 reinitialize_ptrue(); 3458 3459 pop(call_clobbered_gp_registers() - exclude, sp); 3460 } 3461 3462 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3463 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3464 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3465 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3466 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3467 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3468 sve_str(as_FloatRegister(i), Address(sp, i)); 3469 } 3470 } else { 3471 int step = (save_vectors ? 8 : 4) * wordSize; 3472 mov(rscratch1, -step); 3473 sub(sp, sp, step); 3474 for (int i = 28; i >= 4; i -= 4) { 3475 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3476 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3477 } 3478 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3479 } 3480 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3481 sub(sp, sp, total_predicate_in_bytes); 3482 for (int i = 0; i < PRegister::number_of_registers; i++) { 3483 sve_str(as_PRegister(i), Address(sp, i)); 3484 } 3485 } 3486 } 3487 3488 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3489 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3490 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3491 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3492 sve_ldr(as_PRegister(i), Address(sp, i)); 3493 } 3494 add(sp, sp, total_predicate_in_bytes); 3495 } 3496 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3497 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3498 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3499 } 3500 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3501 } else { 3502 int step = (restore_vectors ? 8 : 4) * wordSize; 3503 for (int i = 0; i <= 28; i += 4) 3504 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3505 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3506 } 3507 3508 // We may use predicate registers and rely on ptrue with SVE, 3509 // regardless of wide vector (> 8 bytes) used or not. 3510 if (use_sve) { 3511 reinitialize_ptrue(); 3512 } 3513 3514 // integer registers except lr & sp 3515 pop(RegSet::range(r0, r17), sp); 3516 #ifdef R18_RESERVED 3517 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3518 pop(RegSet::range(r20, r29), sp); 3519 #else 3520 pop(RegSet::range(r18_tls, r29), sp); 3521 #endif 3522 } 3523 3524 /** 3525 * Helpers for multiply_to_len(). 3526 */ 3527 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3528 Register src1, Register src2) { 3529 adds(dest_lo, dest_lo, src1); 3530 adc(dest_hi, dest_hi, zr); 3531 adds(dest_lo, dest_lo, src2); 3532 adc(final_dest_hi, dest_hi, zr); 3533 } 3534 3535 // Generate an address from (r + r1 extend offset). "size" is the 3536 // size of the operand. The result may be in rscratch2. 3537 Address MacroAssembler::offsetted_address(Register r, Register r1, 3538 Address::extend ext, int offset, int size) { 3539 if (offset || (ext.shift() % size != 0)) { 3540 lea(rscratch2, Address(r, r1, ext)); 3541 return Address(rscratch2, offset); 3542 } else { 3543 return Address(r, r1, ext); 3544 } 3545 } 3546 3547 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3548 { 3549 assert(offset >= 0, "spill to negative address?"); 3550 // Offset reachable ? 3551 // Not aligned - 9 bits signed offset 3552 // Aligned - 12 bits unsigned offset shifted 3553 Register base = sp; 3554 if ((offset & (size-1)) && offset >= (1<<8)) { 3555 add(tmp, base, offset & ((1<<12)-1)); 3556 base = tmp; 3557 offset &= -1u<<12; 3558 } 3559 3560 if (offset >= (1<<12) * size) { 3561 add(tmp, base, offset & (((1<<12)-1)<<12)); 3562 base = tmp; 3563 offset &= ~(((1<<12)-1)<<12); 3564 } 3565 3566 return Address(base, offset); 3567 } 3568 3569 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3570 assert(offset >= 0, "spill to negative address?"); 3571 3572 Register base = sp; 3573 3574 // An immediate offset in the range 0 to 255 which is multiplied 3575 // by the current vector or predicate register size in bytes. 3576 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3577 return Address(base, offset / sve_reg_size_in_bytes); 3578 } 3579 3580 add(tmp, base, offset); 3581 return Address(tmp); 3582 } 3583 3584 // Checks whether offset is aligned. 3585 // Returns true if it is, else false. 3586 bool MacroAssembler::merge_alignment_check(Register base, 3587 size_t size, 3588 int64_t cur_offset, 3589 int64_t prev_offset) const { 3590 if (AvoidUnalignedAccesses) { 3591 if (base == sp) { 3592 // Checks whether low offset if aligned to pair of registers. 3593 int64_t pair_mask = size * 2 - 1; 3594 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3595 return (offset & pair_mask) == 0; 3596 } else { // If base is not sp, we can't guarantee the access is aligned. 3597 return false; 3598 } 3599 } else { 3600 int64_t mask = size - 1; 3601 // Load/store pair instruction only supports element size aligned offset. 3602 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3603 } 3604 } 3605 3606 // Checks whether current and previous loads/stores can be merged. 3607 // Returns true if it can be merged, else false. 3608 bool MacroAssembler::ldst_can_merge(Register rt, 3609 const Address &adr, 3610 size_t cur_size_in_bytes, 3611 bool is_store) const { 3612 address prev = pc() - NativeInstruction::instruction_size; 3613 address last = code()->last_insn(); 3614 3615 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3616 return false; 3617 } 3618 3619 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3620 return false; 3621 } 3622 3623 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3624 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3625 3626 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3627 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3628 3629 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3630 return false; 3631 } 3632 3633 int64_t max_offset = 63 * prev_size_in_bytes; 3634 int64_t min_offset = -64 * prev_size_in_bytes; 3635 3636 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3637 3638 // Only same base can be merged. 3639 if (adr.base() != prev_ldst->base()) { 3640 return false; 3641 } 3642 3643 int64_t cur_offset = adr.offset(); 3644 int64_t prev_offset = prev_ldst->offset(); 3645 size_t diff = abs(cur_offset - prev_offset); 3646 if (diff != prev_size_in_bytes) { 3647 return false; 3648 } 3649 3650 // Following cases can not be merged: 3651 // ldr x2, [x2, #8] 3652 // ldr x3, [x2, #16] 3653 // or: 3654 // ldr x2, [x3, #8] 3655 // ldr x2, [x3, #16] 3656 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3657 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3658 return false; 3659 } 3660 3661 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3662 // Offset range must be in ldp/stp instruction's range. 3663 if (low_offset > max_offset || low_offset < min_offset) { 3664 return false; 3665 } 3666 3667 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3668 return true; 3669 } 3670 3671 return false; 3672 } 3673 3674 // Merge current load/store with previous load/store into ldp/stp. 3675 void MacroAssembler::merge_ldst(Register rt, 3676 const Address &adr, 3677 size_t cur_size_in_bytes, 3678 bool is_store) { 3679 3680 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3681 3682 Register rt_low, rt_high; 3683 address prev = pc() - NativeInstruction::instruction_size; 3684 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3685 3686 int64_t offset; 3687 3688 if (adr.offset() < prev_ldst->offset()) { 3689 offset = adr.offset(); 3690 rt_low = rt; 3691 rt_high = prev_ldst->target(); 3692 } else { 3693 offset = prev_ldst->offset(); 3694 rt_low = prev_ldst->target(); 3695 rt_high = rt; 3696 } 3697 3698 Address adr_p = Address(prev_ldst->base(), offset); 3699 // Overwrite previous generated binary. 3700 code_section()->set_end(prev); 3701 3702 const size_t sz = prev_ldst->size_in_bytes(); 3703 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3704 if (!is_store) { 3705 BLOCK_COMMENT("merged ldr pair"); 3706 if (sz == 8) { 3707 ldp(rt_low, rt_high, adr_p); 3708 } else { 3709 ldpw(rt_low, rt_high, adr_p); 3710 } 3711 } else { 3712 BLOCK_COMMENT("merged str pair"); 3713 if (sz == 8) { 3714 stp(rt_low, rt_high, adr_p); 3715 } else { 3716 stpw(rt_low, rt_high, adr_p); 3717 } 3718 } 3719 } 3720 3721 /** 3722 * Multiply 64 bit by 64 bit first loop. 3723 */ 3724 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3725 Register y, Register y_idx, Register z, 3726 Register carry, Register product, 3727 Register idx, Register kdx) { 3728 // 3729 // jlong carry, x[], y[], z[]; 3730 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3731 // huge_128 product = y[idx] * x[xstart] + carry; 3732 // z[kdx] = (jlong)product; 3733 // carry = (jlong)(product >>> 64); 3734 // } 3735 // z[xstart] = carry; 3736 // 3737 3738 Label L_first_loop, L_first_loop_exit; 3739 Label L_one_x, L_one_y, L_multiply; 3740 3741 subsw(xstart, xstart, 1); 3742 br(Assembler::MI, L_one_x); 3743 3744 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3745 ldr(x_xstart, Address(rscratch1)); 3746 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3747 3748 bind(L_first_loop); 3749 subsw(idx, idx, 1); 3750 br(Assembler::MI, L_first_loop_exit); 3751 subsw(idx, idx, 1); 3752 br(Assembler::MI, L_one_y); 3753 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3754 ldr(y_idx, Address(rscratch1)); 3755 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3756 bind(L_multiply); 3757 3758 // AArch64 has a multiply-accumulate instruction that we can't use 3759 // here because it has no way to process carries, so we have to use 3760 // separate add and adc instructions. Bah. 3761 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3762 mul(product, x_xstart, y_idx); 3763 adds(product, product, carry); 3764 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3765 3766 subw(kdx, kdx, 2); 3767 ror(product, product, 32); // back to big-endian 3768 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3769 3770 b(L_first_loop); 3771 3772 bind(L_one_y); 3773 ldrw(y_idx, Address(y, 0)); 3774 b(L_multiply); 3775 3776 bind(L_one_x); 3777 ldrw(x_xstart, Address(x, 0)); 3778 b(L_first_loop); 3779 3780 bind(L_first_loop_exit); 3781 } 3782 3783 /** 3784 * Multiply 128 bit by 128. Unrolled inner loop. 3785 * 3786 */ 3787 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3788 Register carry, Register carry2, 3789 Register idx, Register jdx, 3790 Register yz_idx1, Register yz_idx2, 3791 Register tmp, Register tmp3, Register tmp4, 3792 Register tmp6, Register product_hi) { 3793 3794 // jlong carry, x[], y[], z[]; 3795 // int kdx = ystart+1; 3796 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3797 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3798 // jlong carry2 = (jlong)(tmp3 >>> 64); 3799 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3800 // carry = (jlong)(tmp4 >>> 64); 3801 // z[kdx+idx+1] = (jlong)tmp3; 3802 // z[kdx+idx] = (jlong)tmp4; 3803 // } 3804 // idx += 2; 3805 // if (idx > 0) { 3806 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3807 // z[kdx+idx] = (jlong)yz_idx1; 3808 // carry = (jlong)(yz_idx1 >>> 64); 3809 // } 3810 // 3811 3812 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3813 3814 lsrw(jdx, idx, 2); 3815 3816 bind(L_third_loop); 3817 3818 subsw(jdx, jdx, 1); 3819 br(Assembler::MI, L_third_loop_exit); 3820 subw(idx, idx, 4); 3821 3822 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3823 3824 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3825 3826 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3827 3828 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3829 ror(yz_idx2, yz_idx2, 32); 3830 3831 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3832 3833 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3834 umulh(tmp4, product_hi, yz_idx1); 3835 3836 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3837 ror(rscratch2, rscratch2, 32); 3838 3839 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3840 umulh(carry2, product_hi, yz_idx2); 3841 3842 // propagate sum of both multiplications into carry:tmp4:tmp3 3843 adds(tmp3, tmp3, carry); 3844 adc(tmp4, tmp4, zr); 3845 adds(tmp3, tmp3, rscratch1); 3846 adcs(tmp4, tmp4, tmp); 3847 adc(carry, carry2, zr); 3848 adds(tmp4, tmp4, rscratch2); 3849 adc(carry, carry, zr); 3850 3851 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3852 ror(tmp4, tmp4, 32); 3853 stp(tmp4, tmp3, Address(tmp6, 0)); 3854 3855 b(L_third_loop); 3856 bind (L_third_loop_exit); 3857 3858 andw (idx, idx, 0x3); 3859 cbz(idx, L_post_third_loop_done); 3860 3861 Label L_check_1; 3862 subsw(idx, idx, 2); 3863 br(Assembler::MI, L_check_1); 3864 3865 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3866 ldr(yz_idx1, Address(rscratch1, 0)); 3867 ror(yz_idx1, yz_idx1, 32); 3868 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3869 umulh(tmp4, product_hi, yz_idx1); 3870 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3871 ldr(yz_idx2, Address(rscratch1, 0)); 3872 ror(yz_idx2, yz_idx2, 32); 3873 3874 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3875 3876 ror(tmp3, tmp3, 32); 3877 str(tmp3, Address(rscratch1, 0)); 3878 3879 bind (L_check_1); 3880 3881 andw (idx, idx, 0x1); 3882 subsw(idx, idx, 1); 3883 br(Assembler::MI, L_post_third_loop_done); 3884 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3885 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3886 umulh(carry2, tmp4, product_hi); 3887 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3888 3889 add2_with_carry(carry2, tmp3, tmp4, carry); 3890 3891 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3892 extr(carry, carry2, tmp3, 32); 3893 3894 bind(L_post_third_loop_done); 3895 } 3896 3897 /** 3898 * Code for BigInteger::multiplyToLen() intrinsic. 3899 * 3900 * r0: x 3901 * r1: xlen 3902 * r2: y 3903 * r3: ylen 3904 * r4: z 3905 * r5: tmp0 3906 * r10: tmp1 3907 * r11: tmp2 3908 * r12: tmp3 3909 * r13: tmp4 3910 * r14: tmp5 3911 * r15: tmp6 3912 * r16: tmp7 3913 * 3914 */ 3915 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3916 Register z, Register tmp0, 3917 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3918 Register tmp5, Register tmp6, Register product_hi) { 3919 3920 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 3921 3922 const Register idx = tmp1; 3923 const Register kdx = tmp2; 3924 const Register xstart = tmp3; 3925 3926 const Register y_idx = tmp4; 3927 const Register carry = tmp5; 3928 const Register product = xlen; 3929 const Register x_xstart = tmp0; 3930 3931 // First Loop. 3932 // 3933 // final static long LONG_MASK = 0xffffffffL; 3934 // int xstart = xlen - 1; 3935 // int ystart = ylen - 1; 3936 // long carry = 0; 3937 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3938 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3939 // z[kdx] = (int)product; 3940 // carry = product >>> 32; 3941 // } 3942 // z[xstart] = (int)carry; 3943 // 3944 3945 movw(idx, ylen); // idx = ylen; 3946 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 3947 mov(carry, zr); // carry = 0; 3948 3949 Label L_done; 3950 3951 movw(xstart, xlen); 3952 subsw(xstart, xstart, 1); 3953 br(Assembler::MI, L_done); 3954 3955 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3956 3957 Label L_second_loop; 3958 cbzw(kdx, L_second_loop); 3959 3960 Label L_carry; 3961 subw(kdx, kdx, 1); 3962 cbzw(kdx, L_carry); 3963 3964 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3965 lsr(carry, carry, 32); 3966 subw(kdx, kdx, 1); 3967 3968 bind(L_carry); 3969 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3970 3971 // Second and third (nested) loops. 3972 // 3973 // for (int i = xstart-1; i >= 0; i--) { // Second loop 3974 // carry = 0; 3975 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 3976 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 3977 // (z[k] & LONG_MASK) + carry; 3978 // z[k] = (int)product; 3979 // carry = product >>> 32; 3980 // } 3981 // z[i] = (int)carry; 3982 // } 3983 // 3984 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 3985 3986 const Register jdx = tmp1; 3987 3988 bind(L_second_loop); 3989 mov(carry, zr); // carry = 0; 3990 movw(jdx, ylen); // j = ystart+1 3991 3992 subsw(xstart, xstart, 1); // i = xstart-1; 3993 br(Assembler::MI, L_done); 3994 3995 str(z, Address(pre(sp, -4 * wordSize))); 3996 3997 Label L_last_x; 3998 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 3999 subsw(xstart, xstart, 1); // i = xstart-1; 4000 br(Assembler::MI, L_last_x); 4001 4002 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4003 ldr(product_hi, Address(rscratch1)); 4004 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4005 4006 Label L_third_loop_prologue; 4007 bind(L_third_loop_prologue); 4008 4009 str(ylen, Address(sp, wordSize)); 4010 stp(x, xstart, Address(sp, 2 * wordSize)); 4011 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4012 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4013 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4014 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4015 4016 addw(tmp3, xlen, 1); 4017 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4018 subsw(tmp3, tmp3, 1); 4019 br(Assembler::MI, L_done); 4020 4021 lsr(carry, carry, 32); 4022 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4023 b(L_second_loop); 4024 4025 // Next infrequent code is moved outside loops. 4026 bind(L_last_x); 4027 ldrw(product_hi, Address(x, 0)); 4028 b(L_third_loop_prologue); 4029 4030 bind(L_done); 4031 } 4032 4033 // Code for BigInteger::mulAdd intrinsic 4034 // out = r0 4035 // in = r1 4036 // offset = r2 (already out.length-offset) 4037 // len = r3 4038 // k = r4 4039 // 4040 // pseudo code from java implementation: 4041 // carry = 0; 4042 // offset = out.length-offset - 1; 4043 // for (int j=len-1; j >= 0; j--) { 4044 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4045 // out[offset--] = (int)product; 4046 // carry = product >>> 32; 4047 // } 4048 // return (int)carry; 4049 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4050 Register len, Register k) { 4051 Label LOOP, END; 4052 // pre-loop 4053 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4054 csel(out, zr, out, Assembler::EQ); 4055 br(Assembler::EQ, END); 4056 add(in, in, len, LSL, 2); // in[j+1] address 4057 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4058 mov(out, zr); // used to keep carry now 4059 BIND(LOOP); 4060 ldrw(rscratch1, Address(pre(in, -4))); 4061 madd(rscratch1, rscratch1, k, out); 4062 ldrw(rscratch2, Address(pre(offset, -4))); 4063 add(rscratch1, rscratch1, rscratch2); 4064 strw(rscratch1, Address(offset)); 4065 lsr(out, rscratch1, 32); 4066 subs(len, len, 1); 4067 br(Assembler::NE, LOOP); 4068 BIND(END); 4069 } 4070 4071 /** 4072 * Emits code to update CRC-32 with a byte value according to constants in table 4073 * 4074 * @param [in,out]crc Register containing the crc. 4075 * @param [in]val Register containing the byte to fold into the CRC. 4076 * @param [in]table Register containing the table of crc constants. 4077 * 4078 * uint32_t crc; 4079 * val = crc_table[(val ^ crc) & 0xFF]; 4080 * crc = val ^ (crc >> 8); 4081 * 4082 */ 4083 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4084 eor(val, val, crc); 4085 andr(val, val, 0xff); 4086 ldrw(val, Address(table, val, Address::lsl(2))); 4087 eor(crc, val, crc, Assembler::LSR, 8); 4088 } 4089 4090 /** 4091 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4092 * 4093 * @param [in,out]crc Register containing the crc. 4094 * @param [in]v Register containing the 32-bit to fold into the CRC. 4095 * @param [in]table0 Register containing table 0 of crc constants. 4096 * @param [in]table1 Register containing table 1 of crc constants. 4097 * @param [in]table2 Register containing table 2 of crc constants. 4098 * @param [in]table3 Register containing table 3 of crc constants. 4099 * 4100 * uint32_t crc; 4101 * v = crc ^ v 4102 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4103 * 4104 */ 4105 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4106 Register table0, Register table1, Register table2, Register table3, 4107 bool upper) { 4108 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4109 uxtb(tmp, v); 4110 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4111 ubfx(tmp, v, 8, 8); 4112 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4113 eor(crc, crc, tmp); 4114 ubfx(tmp, v, 16, 8); 4115 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4116 eor(crc, crc, tmp); 4117 ubfx(tmp, v, 24, 8); 4118 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4119 eor(crc, crc, tmp); 4120 } 4121 4122 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4123 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4124 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4125 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4126 4127 subs(tmp0, len, 384); 4128 mvnw(crc, crc); 4129 br(Assembler::GE, CRC_by128_pre); 4130 BIND(CRC_less128); 4131 subs(len, len, 32); 4132 br(Assembler::GE, CRC_by32_loop); 4133 BIND(CRC_less32); 4134 adds(len, len, 32 - 4); 4135 br(Assembler::GE, CRC_by4_loop); 4136 adds(len, len, 4); 4137 br(Assembler::GT, CRC_by1_loop); 4138 b(L_exit); 4139 4140 BIND(CRC_by32_loop); 4141 ldp(tmp0, tmp1, Address(buf)); 4142 crc32x(crc, crc, tmp0); 4143 ldp(tmp2, tmp3, Address(buf, 16)); 4144 crc32x(crc, crc, tmp1); 4145 add(buf, buf, 32); 4146 crc32x(crc, crc, tmp2); 4147 subs(len, len, 32); 4148 crc32x(crc, crc, tmp3); 4149 br(Assembler::GE, CRC_by32_loop); 4150 cmn(len, (u1)32); 4151 br(Assembler::NE, CRC_less32); 4152 b(L_exit); 4153 4154 BIND(CRC_by4_loop); 4155 ldrw(tmp0, Address(post(buf, 4))); 4156 subs(len, len, 4); 4157 crc32w(crc, crc, tmp0); 4158 br(Assembler::GE, CRC_by4_loop); 4159 adds(len, len, 4); 4160 br(Assembler::LE, L_exit); 4161 BIND(CRC_by1_loop); 4162 ldrb(tmp0, Address(post(buf, 1))); 4163 subs(len, len, 1); 4164 crc32b(crc, crc, tmp0); 4165 br(Assembler::GT, CRC_by1_loop); 4166 b(L_exit); 4167 4168 BIND(CRC_by128_pre); 4169 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4170 4*256*sizeof(juint) + 8*sizeof(juint)); 4171 mov(crc, 0); 4172 crc32x(crc, crc, tmp0); 4173 crc32x(crc, crc, tmp1); 4174 4175 cbnz(len, CRC_less128); 4176 4177 BIND(L_exit); 4178 mvnw(crc, crc); 4179 } 4180 4181 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4182 Register len, Register tmp0, Register tmp1, Register tmp2, 4183 Register tmp3) { 4184 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4185 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4186 4187 mvnw(crc, crc); 4188 4189 subs(len, len, 128); 4190 br(Assembler::GE, CRC_by64_pre); 4191 BIND(CRC_less64); 4192 adds(len, len, 128-32); 4193 br(Assembler::GE, CRC_by32_loop); 4194 BIND(CRC_less32); 4195 adds(len, len, 32-4); 4196 br(Assembler::GE, CRC_by4_loop); 4197 adds(len, len, 4); 4198 br(Assembler::GT, CRC_by1_loop); 4199 b(L_exit); 4200 4201 BIND(CRC_by32_loop); 4202 ldp(tmp0, tmp1, Address(post(buf, 16))); 4203 subs(len, len, 32); 4204 crc32x(crc, crc, tmp0); 4205 ldr(tmp2, Address(post(buf, 8))); 4206 crc32x(crc, crc, tmp1); 4207 ldr(tmp3, Address(post(buf, 8))); 4208 crc32x(crc, crc, tmp2); 4209 crc32x(crc, crc, tmp3); 4210 br(Assembler::GE, CRC_by32_loop); 4211 cmn(len, (u1)32); 4212 br(Assembler::NE, CRC_less32); 4213 b(L_exit); 4214 4215 BIND(CRC_by4_loop); 4216 ldrw(tmp0, Address(post(buf, 4))); 4217 subs(len, len, 4); 4218 crc32w(crc, crc, tmp0); 4219 br(Assembler::GE, CRC_by4_loop); 4220 adds(len, len, 4); 4221 br(Assembler::LE, L_exit); 4222 BIND(CRC_by1_loop); 4223 ldrb(tmp0, Address(post(buf, 1))); 4224 subs(len, len, 1); 4225 crc32b(crc, crc, tmp0); 4226 br(Assembler::GT, CRC_by1_loop); 4227 b(L_exit); 4228 4229 BIND(CRC_by64_pre); 4230 sub(buf, buf, 8); 4231 ldp(tmp0, tmp1, Address(buf, 8)); 4232 crc32x(crc, crc, tmp0); 4233 ldr(tmp2, Address(buf, 24)); 4234 crc32x(crc, crc, tmp1); 4235 ldr(tmp3, Address(buf, 32)); 4236 crc32x(crc, crc, tmp2); 4237 ldr(tmp0, Address(buf, 40)); 4238 crc32x(crc, crc, tmp3); 4239 ldr(tmp1, Address(buf, 48)); 4240 crc32x(crc, crc, tmp0); 4241 ldr(tmp2, Address(buf, 56)); 4242 crc32x(crc, crc, tmp1); 4243 ldr(tmp3, Address(pre(buf, 64))); 4244 4245 b(CRC_by64_loop); 4246 4247 align(CodeEntryAlignment); 4248 BIND(CRC_by64_loop); 4249 subs(len, len, 64); 4250 crc32x(crc, crc, tmp2); 4251 ldr(tmp0, Address(buf, 8)); 4252 crc32x(crc, crc, tmp3); 4253 ldr(tmp1, Address(buf, 16)); 4254 crc32x(crc, crc, tmp0); 4255 ldr(tmp2, Address(buf, 24)); 4256 crc32x(crc, crc, tmp1); 4257 ldr(tmp3, Address(buf, 32)); 4258 crc32x(crc, crc, tmp2); 4259 ldr(tmp0, Address(buf, 40)); 4260 crc32x(crc, crc, tmp3); 4261 ldr(tmp1, Address(buf, 48)); 4262 crc32x(crc, crc, tmp0); 4263 ldr(tmp2, Address(buf, 56)); 4264 crc32x(crc, crc, tmp1); 4265 ldr(tmp3, Address(pre(buf, 64))); 4266 br(Assembler::GE, CRC_by64_loop); 4267 4268 // post-loop 4269 crc32x(crc, crc, tmp2); 4270 crc32x(crc, crc, tmp3); 4271 4272 sub(len, len, 64); 4273 add(buf, buf, 8); 4274 cmn(len, (u1)128); 4275 br(Assembler::NE, CRC_less64); 4276 BIND(L_exit); 4277 mvnw(crc, crc); 4278 } 4279 4280 /** 4281 * @param crc register containing existing CRC (32-bit) 4282 * @param buf register pointing to input byte buffer (byte*) 4283 * @param len register containing number of bytes 4284 * @param table register that will contain address of CRC table 4285 * @param tmp scratch register 4286 */ 4287 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4288 Register table0, Register table1, Register table2, Register table3, 4289 Register tmp, Register tmp2, Register tmp3) { 4290 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4291 4292 if (UseCryptoPmullForCRC32) { 4293 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4294 return; 4295 } 4296 4297 if (UseCRC32) { 4298 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4299 return; 4300 } 4301 4302 mvnw(crc, crc); 4303 4304 { 4305 uint64_t offset; 4306 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4307 add(table0, table0, offset); 4308 } 4309 add(table1, table0, 1*256*sizeof(juint)); 4310 add(table2, table0, 2*256*sizeof(juint)); 4311 add(table3, table0, 3*256*sizeof(juint)); 4312 4313 { // Neon code start 4314 cmp(len, (u1)64); 4315 br(Assembler::LT, L_by16); 4316 eor(v16, T16B, v16, v16); 4317 4318 Label L_fold; 4319 4320 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4321 4322 ld1(v0, v1, T2D, post(buf, 32)); 4323 ld1r(v4, T2D, post(tmp, 8)); 4324 ld1r(v5, T2D, post(tmp, 8)); 4325 ld1r(v6, T2D, post(tmp, 8)); 4326 ld1r(v7, T2D, post(tmp, 8)); 4327 mov(v16, S, 0, crc); 4328 4329 eor(v0, T16B, v0, v16); 4330 sub(len, len, 64); 4331 4332 BIND(L_fold); 4333 pmull(v22, T8H, v0, v5, T8B); 4334 pmull(v20, T8H, v0, v7, T8B); 4335 pmull(v23, T8H, v0, v4, T8B); 4336 pmull(v21, T8H, v0, v6, T8B); 4337 4338 pmull2(v18, T8H, v0, v5, T16B); 4339 pmull2(v16, T8H, v0, v7, T16B); 4340 pmull2(v19, T8H, v0, v4, T16B); 4341 pmull2(v17, T8H, v0, v6, T16B); 4342 4343 uzp1(v24, T8H, v20, v22); 4344 uzp2(v25, T8H, v20, v22); 4345 eor(v20, T16B, v24, v25); 4346 4347 uzp1(v26, T8H, v16, v18); 4348 uzp2(v27, T8H, v16, v18); 4349 eor(v16, T16B, v26, v27); 4350 4351 ushll2(v22, T4S, v20, T8H, 8); 4352 ushll(v20, T4S, v20, T4H, 8); 4353 4354 ushll2(v18, T4S, v16, T8H, 8); 4355 ushll(v16, T4S, v16, T4H, 8); 4356 4357 eor(v22, T16B, v23, v22); 4358 eor(v18, T16B, v19, v18); 4359 eor(v20, T16B, v21, v20); 4360 eor(v16, T16B, v17, v16); 4361 4362 uzp1(v17, T2D, v16, v20); 4363 uzp2(v21, T2D, v16, v20); 4364 eor(v17, T16B, v17, v21); 4365 4366 ushll2(v20, T2D, v17, T4S, 16); 4367 ushll(v16, T2D, v17, T2S, 16); 4368 4369 eor(v20, T16B, v20, v22); 4370 eor(v16, T16B, v16, v18); 4371 4372 uzp1(v17, T2D, v20, v16); 4373 uzp2(v21, T2D, v20, v16); 4374 eor(v28, T16B, v17, v21); 4375 4376 pmull(v22, T8H, v1, v5, T8B); 4377 pmull(v20, T8H, v1, v7, T8B); 4378 pmull(v23, T8H, v1, v4, T8B); 4379 pmull(v21, T8H, v1, v6, T8B); 4380 4381 pmull2(v18, T8H, v1, v5, T16B); 4382 pmull2(v16, T8H, v1, v7, T16B); 4383 pmull2(v19, T8H, v1, v4, T16B); 4384 pmull2(v17, T8H, v1, v6, T16B); 4385 4386 ld1(v0, v1, T2D, post(buf, 32)); 4387 4388 uzp1(v24, T8H, v20, v22); 4389 uzp2(v25, T8H, v20, v22); 4390 eor(v20, T16B, v24, v25); 4391 4392 uzp1(v26, T8H, v16, v18); 4393 uzp2(v27, T8H, v16, v18); 4394 eor(v16, T16B, v26, v27); 4395 4396 ushll2(v22, T4S, v20, T8H, 8); 4397 ushll(v20, T4S, v20, T4H, 8); 4398 4399 ushll2(v18, T4S, v16, T8H, 8); 4400 ushll(v16, T4S, v16, T4H, 8); 4401 4402 eor(v22, T16B, v23, v22); 4403 eor(v18, T16B, v19, v18); 4404 eor(v20, T16B, v21, v20); 4405 eor(v16, T16B, v17, v16); 4406 4407 uzp1(v17, T2D, v16, v20); 4408 uzp2(v21, T2D, v16, v20); 4409 eor(v16, T16B, v17, v21); 4410 4411 ushll2(v20, T2D, v16, T4S, 16); 4412 ushll(v16, T2D, v16, T2S, 16); 4413 4414 eor(v20, T16B, v22, v20); 4415 eor(v16, T16B, v16, v18); 4416 4417 uzp1(v17, T2D, v20, v16); 4418 uzp2(v21, T2D, v20, v16); 4419 eor(v20, T16B, v17, v21); 4420 4421 shl(v16, T2D, v28, 1); 4422 shl(v17, T2D, v20, 1); 4423 4424 eor(v0, T16B, v0, v16); 4425 eor(v1, T16B, v1, v17); 4426 4427 subs(len, len, 32); 4428 br(Assembler::GE, L_fold); 4429 4430 mov(crc, 0); 4431 mov(tmp, v0, D, 0); 4432 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4433 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4434 mov(tmp, v0, D, 1); 4435 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4436 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4437 mov(tmp, v1, D, 0); 4438 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4439 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4440 mov(tmp, v1, D, 1); 4441 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4442 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4443 4444 add(len, len, 32); 4445 } // Neon code end 4446 4447 BIND(L_by16); 4448 subs(len, len, 16); 4449 br(Assembler::GE, L_by16_loop); 4450 adds(len, len, 16-4); 4451 br(Assembler::GE, L_by4_loop); 4452 adds(len, len, 4); 4453 br(Assembler::GT, L_by1_loop); 4454 b(L_exit); 4455 4456 BIND(L_by4_loop); 4457 ldrw(tmp, Address(post(buf, 4))); 4458 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4459 subs(len, len, 4); 4460 br(Assembler::GE, L_by4_loop); 4461 adds(len, len, 4); 4462 br(Assembler::LE, L_exit); 4463 BIND(L_by1_loop); 4464 subs(len, len, 1); 4465 ldrb(tmp, Address(post(buf, 1))); 4466 update_byte_crc32(crc, tmp, table0); 4467 br(Assembler::GT, L_by1_loop); 4468 b(L_exit); 4469 4470 align(CodeEntryAlignment); 4471 BIND(L_by16_loop); 4472 subs(len, len, 16); 4473 ldp(tmp, tmp3, Address(post(buf, 16))); 4474 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4475 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4476 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4477 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4478 br(Assembler::GE, L_by16_loop); 4479 adds(len, len, 16-4); 4480 br(Assembler::GE, L_by4_loop); 4481 adds(len, len, 4); 4482 br(Assembler::GT, L_by1_loop); 4483 BIND(L_exit); 4484 mvnw(crc, crc); 4485 } 4486 4487 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4488 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4489 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4490 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4491 4492 subs(tmp0, len, 384); 4493 br(Assembler::GE, CRC_by128_pre); 4494 BIND(CRC_less128); 4495 subs(len, len, 32); 4496 br(Assembler::GE, CRC_by32_loop); 4497 BIND(CRC_less32); 4498 adds(len, len, 32 - 4); 4499 br(Assembler::GE, CRC_by4_loop); 4500 adds(len, len, 4); 4501 br(Assembler::GT, CRC_by1_loop); 4502 b(L_exit); 4503 4504 BIND(CRC_by32_loop); 4505 ldp(tmp0, tmp1, Address(buf)); 4506 crc32cx(crc, crc, tmp0); 4507 ldr(tmp2, Address(buf, 16)); 4508 crc32cx(crc, crc, tmp1); 4509 ldr(tmp3, Address(buf, 24)); 4510 crc32cx(crc, crc, tmp2); 4511 add(buf, buf, 32); 4512 subs(len, len, 32); 4513 crc32cx(crc, crc, tmp3); 4514 br(Assembler::GE, CRC_by32_loop); 4515 cmn(len, (u1)32); 4516 br(Assembler::NE, CRC_less32); 4517 b(L_exit); 4518 4519 BIND(CRC_by4_loop); 4520 ldrw(tmp0, Address(post(buf, 4))); 4521 subs(len, len, 4); 4522 crc32cw(crc, crc, tmp0); 4523 br(Assembler::GE, CRC_by4_loop); 4524 adds(len, len, 4); 4525 br(Assembler::LE, L_exit); 4526 BIND(CRC_by1_loop); 4527 ldrb(tmp0, Address(post(buf, 1))); 4528 subs(len, len, 1); 4529 crc32cb(crc, crc, tmp0); 4530 br(Assembler::GT, CRC_by1_loop); 4531 b(L_exit); 4532 4533 BIND(CRC_by128_pre); 4534 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4535 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4536 mov(crc, 0); 4537 crc32cx(crc, crc, tmp0); 4538 crc32cx(crc, crc, tmp1); 4539 4540 cbnz(len, CRC_less128); 4541 4542 BIND(L_exit); 4543 } 4544 4545 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4546 Register len, Register tmp0, Register tmp1, Register tmp2, 4547 Register tmp3) { 4548 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4549 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4550 4551 subs(len, len, 128); 4552 br(Assembler::GE, CRC_by64_pre); 4553 BIND(CRC_less64); 4554 adds(len, len, 128-32); 4555 br(Assembler::GE, CRC_by32_loop); 4556 BIND(CRC_less32); 4557 adds(len, len, 32-4); 4558 br(Assembler::GE, CRC_by4_loop); 4559 adds(len, len, 4); 4560 br(Assembler::GT, CRC_by1_loop); 4561 b(L_exit); 4562 4563 BIND(CRC_by32_loop); 4564 ldp(tmp0, tmp1, Address(post(buf, 16))); 4565 subs(len, len, 32); 4566 crc32cx(crc, crc, tmp0); 4567 ldr(tmp2, Address(post(buf, 8))); 4568 crc32cx(crc, crc, tmp1); 4569 ldr(tmp3, Address(post(buf, 8))); 4570 crc32cx(crc, crc, tmp2); 4571 crc32cx(crc, crc, tmp3); 4572 br(Assembler::GE, CRC_by32_loop); 4573 cmn(len, (u1)32); 4574 br(Assembler::NE, CRC_less32); 4575 b(L_exit); 4576 4577 BIND(CRC_by4_loop); 4578 ldrw(tmp0, Address(post(buf, 4))); 4579 subs(len, len, 4); 4580 crc32cw(crc, crc, tmp0); 4581 br(Assembler::GE, CRC_by4_loop); 4582 adds(len, len, 4); 4583 br(Assembler::LE, L_exit); 4584 BIND(CRC_by1_loop); 4585 ldrb(tmp0, Address(post(buf, 1))); 4586 subs(len, len, 1); 4587 crc32cb(crc, crc, tmp0); 4588 br(Assembler::GT, CRC_by1_loop); 4589 b(L_exit); 4590 4591 BIND(CRC_by64_pre); 4592 sub(buf, buf, 8); 4593 ldp(tmp0, tmp1, Address(buf, 8)); 4594 crc32cx(crc, crc, tmp0); 4595 ldr(tmp2, Address(buf, 24)); 4596 crc32cx(crc, crc, tmp1); 4597 ldr(tmp3, Address(buf, 32)); 4598 crc32cx(crc, crc, tmp2); 4599 ldr(tmp0, Address(buf, 40)); 4600 crc32cx(crc, crc, tmp3); 4601 ldr(tmp1, Address(buf, 48)); 4602 crc32cx(crc, crc, tmp0); 4603 ldr(tmp2, Address(buf, 56)); 4604 crc32cx(crc, crc, tmp1); 4605 ldr(tmp3, Address(pre(buf, 64))); 4606 4607 b(CRC_by64_loop); 4608 4609 align(CodeEntryAlignment); 4610 BIND(CRC_by64_loop); 4611 subs(len, len, 64); 4612 crc32cx(crc, crc, tmp2); 4613 ldr(tmp0, Address(buf, 8)); 4614 crc32cx(crc, crc, tmp3); 4615 ldr(tmp1, Address(buf, 16)); 4616 crc32cx(crc, crc, tmp0); 4617 ldr(tmp2, Address(buf, 24)); 4618 crc32cx(crc, crc, tmp1); 4619 ldr(tmp3, Address(buf, 32)); 4620 crc32cx(crc, crc, tmp2); 4621 ldr(tmp0, Address(buf, 40)); 4622 crc32cx(crc, crc, tmp3); 4623 ldr(tmp1, Address(buf, 48)); 4624 crc32cx(crc, crc, tmp0); 4625 ldr(tmp2, Address(buf, 56)); 4626 crc32cx(crc, crc, tmp1); 4627 ldr(tmp3, Address(pre(buf, 64))); 4628 br(Assembler::GE, CRC_by64_loop); 4629 4630 // post-loop 4631 crc32cx(crc, crc, tmp2); 4632 crc32cx(crc, crc, tmp3); 4633 4634 sub(len, len, 64); 4635 add(buf, buf, 8); 4636 cmn(len, (u1)128); 4637 br(Assembler::NE, CRC_less64); 4638 BIND(L_exit); 4639 } 4640 4641 /** 4642 * @param crc register containing existing CRC (32-bit) 4643 * @param buf register pointing to input byte buffer (byte*) 4644 * @param len register containing number of bytes 4645 * @param table register that will contain address of CRC table 4646 * @param tmp scratch register 4647 */ 4648 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4649 Register table0, Register table1, Register table2, Register table3, 4650 Register tmp, Register tmp2, Register tmp3) { 4651 if (UseCryptoPmullForCRC32) { 4652 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4653 } else { 4654 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4655 } 4656 } 4657 4658 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4659 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4660 Label CRC_by128_loop; 4661 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4662 4663 sub(len, len, 256); 4664 Register table = tmp0; 4665 { 4666 uint64_t offset; 4667 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4668 add(table, table, offset); 4669 } 4670 add(table, table, table_offset); 4671 4672 // Registers v0..v7 are used as data registers. 4673 // Registers v16..v31 are used as tmp registers. 4674 sub(buf, buf, 0x10); 4675 ldrq(v0, Address(buf, 0x10)); 4676 ldrq(v1, Address(buf, 0x20)); 4677 ldrq(v2, Address(buf, 0x30)); 4678 ldrq(v3, Address(buf, 0x40)); 4679 ldrq(v4, Address(buf, 0x50)); 4680 ldrq(v5, Address(buf, 0x60)); 4681 ldrq(v6, Address(buf, 0x70)); 4682 ldrq(v7, Address(pre(buf, 0x80))); 4683 4684 movi(v31, T4S, 0); 4685 mov(v31, S, 0, crc); 4686 eor(v0, T16B, v0, v31); 4687 4688 // Register v16 contains constants from the crc table. 4689 ldrq(v16, Address(table)); 4690 b(CRC_by128_loop); 4691 4692 align(OptoLoopAlignment); 4693 BIND(CRC_by128_loop); 4694 pmull (v17, T1Q, v0, v16, T1D); 4695 pmull2(v18, T1Q, v0, v16, T2D); 4696 ldrq(v0, Address(buf, 0x10)); 4697 eor3(v0, T16B, v17, v18, v0); 4698 4699 pmull (v19, T1Q, v1, v16, T1D); 4700 pmull2(v20, T1Q, v1, v16, T2D); 4701 ldrq(v1, Address(buf, 0x20)); 4702 eor3(v1, T16B, v19, v20, v1); 4703 4704 pmull (v21, T1Q, v2, v16, T1D); 4705 pmull2(v22, T1Q, v2, v16, T2D); 4706 ldrq(v2, Address(buf, 0x30)); 4707 eor3(v2, T16B, v21, v22, v2); 4708 4709 pmull (v23, T1Q, v3, v16, T1D); 4710 pmull2(v24, T1Q, v3, v16, T2D); 4711 ldrq(v3, Address(buf, 0x40)); 4712 eor3(v3, T16B, v23, v24, v3); 4713 4714 pmull (v25, T1Q, v4, v16, T1D); 4715 pmull2(v26, T1Q, v4, v16, T2D); 4716 ldrq(v4, Address(buf, 0x50)); 4717 eor3(v4, T16B, v25, v26, v4); 4718 4719 pmull (v27, T1Q, v5, v16, T1D); 4720 pmull2(v28, T1Q, v5, v16, T2D); 4721 ldrq(v5, Address(buf, 0x60)); 4722 eor3(v5, T16B, v27, v28, v5); 4723 4724 pmull (v29, T1Q, v6, v16, T1D); 4725 pmull2(v30, T1Q, v6, v16, T2D); 4726 ldrq(v6, Address(buf, 0x70)); 4727 eor3(v6, T16B, v29, v30, v6); 4728 4729 // Reuse registers v23, v24. 4730 // Using them won't block the first instruction of the next iteration. 4731 pmull (v23, T1Q, v7, v16, T1D); 4732 pmull2(v24, T1Q, v7, v16, T2D); 4733 ldrq(v7, Address(pre(buf, 0x80))); 4734 eor3(v7, T16B, v23, v24, v7); 4735 4736 subs(len, len, 0x80); 4737 br(Assembler::GE, CRC_by128_loop); 4738 4739 // fold into 512 bits 4740 // Use v31 for constants because v16 can be still in use. 4741 ldrq(v31, Address(table, 0x10)); 4742 4743 pmull (v17, T1Q, v0, v31, T1D); 4744 pmull2(v18, T1Q, v0, v31, T2D); 4745 eor3(v0, T16B, v17, v18, v4); 4746 4747 pmull (v19, T1Q, v1, v31, T1D); 4748 pmull2(v20, T1Q, v1, v31, T2D); 4749 eor3(v1, T16B, v19, v20, v5); 4750 4751 pmull (v21, T1Q, v2, v31, T1D); 4752 pmull2(v22, T1Q, v2, v31, T2D); 4753 eor3(v2, T16B, v21, v22, v6); 4754 4755 pmull (v23, T1Q, v3, v31, T1D); 4756 pmull2(v24, T1Q, v3, v31, T2D); 4757 eor3(v3, T16B, v23, v24, v7); 4758 4759 // fold into 128 bits 4760 // Use v17 for constants because v31 can be still in use. 4761 ldrq(v17, Address(table, 0x20)); 4762 pmull (v25, T1Q, v0, v17, T1D); 4763 pmull2(v26, T1Q, v0, v17, T2D); 4764 eor3(v3, T16B, v3, v25, v26); 4765 4766 // Use v18 for constants because v17 can be still in use. 4767 ldrq(v18, Address(table, 0x30)); 4768 pmull (v27, T1Q, v1, v18, T1D); 4769 pmull2(v28, T1Q, v1, v18, T2D); 4770 eor3(v3, T16B, v3, v27, v28); 4771 4772 // Use v19 for constants because v18 can be still in use. 4773 ldrq(v19, Address(table, 0x40)); 4774 pmull (v29, T1Q, v2, v19, T1D); 4775 pmull2(v30, T1Q, v2, v19, T2D); 4776 eor3(v0, T16B, v3, v29, v30); 4777 4778 add(len, len, 0x80); 4779 add(buf, buf, 0x10); 4780 4781 mov(tmp0, v0, D, 0); 4782 mov(tmp1, v0, D, 1); 4783 } 4784 4785 SkipIfEqual::SkipIfEqual( 4786 MacroAssembler* masm, const bool* flag_addr, bool value) { 4787 _masm = masm; 4788 uint64_t offset; 4789 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4790 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4791 if (value) { 4792 _masm->cbnzw(rscratch1, _label); 4793 } else { 4794 _masm->cbzw(rscratch1, _label); 4795 } 4796 } 4797 4798 SkipIfEqual::~SkipIfEqual() { 4799 _masm->bind(_label); 4800 } 4801 4802 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4803 Address adr; 4804 switch(dst.getMode()) { 4805 case Address::base_plus_offset: 4806 // This is the expected mode, although we allow all the other 4807 // forms below. 4808 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4809 break; 4810 default: 4811 lea(rscratch2, dst); 4812 adr = Address(rscratch2); 4813 break; 4814 } 4815 ldr(rscratch1, adr); 4816 add(rscratch1, rscratch1, src); 4817 str(rscratch1, adr); 4818 } 4819 4820 void MacroAssembler::cmpptr(Register src1, Address src2) { 4821 uint64_t offset; 4822 adrp(rscratch1, src2, offset); 4823 ldr(rscratch1, Address(rscratch1, offset)); 4824 cmp(src1, rscratch1); 4825 } 4826 4827 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4828 cmp(obj1, obj2); 4829 } 4830 4831 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4832 load_method_holder(rresult, rmethod); 4833 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4834 } 4835 4836 void MacroAssembler::load_method_holder(Register holder, Register method) { 4837 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4838 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4839 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4840 } 4841 4842 void MacroAssembler::load_klass(Register dst, Register src) { 4843 if (UseCompressedClassPointers) { 4844 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4845 decode_klass_not_null(dst); 4846 } else { 4847 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4848 } 4849 } 4850 4851 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4852 if (RestoreMXCSROnJNICalls) { 4853 Label OK; 4854 get_fpcr(tmp1); 4855 mov(tmp2, tmp1); 4856 // Set FPCR to the state we need. We do want Round to Nearest. We 4857 // don't want non-IEEE rounding modes or floating-point traps. 4858 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4859 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4860 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4861 eor(tmp2, tmp1, tmp2); 4862 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4863 set_fpcr(tmp1); 4864 bind(OK); 4865 } 4866 } 4867 4868 // ((OopHandle)result).resolve(); 4869 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4870 // OopHandle::resolve is an indirection. 4871 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4872 } 4873 4874 // ((WeakHandle)result).resolve(); 4875 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4876 assert_different_registers(result, tmp1, tmp2); 4877 Label resolved; 4878 4879 // A null weak handle resolves to null. 4880 cbz(result, resolved); 4881 4882 // Only 64 bit platforms support GCs that require a tmp register 4883 // WeakHandle::resolve is an indirection like jweak. 4884 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4885 result, Address(result), tmp1, tmp2); 4886 bind(resolved); 4887 } 4888 4889 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4890 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4891 ldr(dst, Address(rmethod, Method::const_offset())); 4892 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4893 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4894 ldr(dst, Address(dst, mirror_offset)); 4895 resolve_oop_handle(dst, tmp1, tmp2); 4896 } 4897 4898 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4899 if (UseCompressedClassPointers) { 4900 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4901 if (CompressedKlassPointers::base() == nullptr) { 4902 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4903 return; 4904 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4905 && CompressedKlassPointers::shift() == 0) { 4906 // Only the bottom 32 bits matter 4907 cmpw(trial_klass, tmp); 4908 return; 4909 } 4910 decode_klass_not_null(tmp); 4911 } else { 4912 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4913 } 4914 cmp(trial_klass, tmp); 4915 } 4916 4917 void MacroAssembler::store_klass(Register dst, Register src) { 4918 // FIXME: Should this be a store release? concurrent gcs assumes 4919 // klass length is valid if klass field is not null. 4920 if (UseCompressedClassPointers) { 4921 encode_klass_not_null(src); 4922 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4923 } else { 4924 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4925 } 4926 } 4927 4928 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4929 if (UseCompressedClassPointers) { 4930 // Store to klass gap in destination 4931 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4932 } 4933 } 4934 4935 // Algorithm must match CompressedOops::encode. 4936 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4937 #ifdef ASSERT 4938 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4939 #endif 4940 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4941 if (CompressedOops::base() == nullptr) { 4942 if (CompressedOops::shift() != 0) { 4943 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4944 lsr(d, s, LogMinObjAlignmentInBytes); 4945 } else { 4946 mov(d, s); 4947 } 4948 } else { 4949 subs(d, s, rheapbase); 4950 csel(d, d, zr, Assembler::HS); 4951 lsr(d, d, LogMinObjAlignmentInBytes); 4952 4953 /* Old algorithm: is this any worse? 4954 Label nonnull; 4955 cbnz(r, nonnull); 4956 sub(r, r, rheapbase); 4957 bind(nonnull); 4958 lsr(r, r, LogMinObjAlignmentInBytes); 4959 */ 4960 } 4961 } 4962 4963 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4964 #ifdef ASSERT 4965 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4966 if (CheckCompressedOops) { 4967 Label ok; 4968 cbnz(r, ok); 4969 stop("null oop passed to encode_heap_oop_not_null"); 4970 bind(ok); 4971 } 4972 #endif 4973 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 4974 if (CompressedOops::base() != nullptr) { 4975 sub(r, r, rheapbase); 4976 } 4977 if (CompressedOops::shift() != 0) { 4978 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4979 lsr(r, r, LogMinObjAlignmentInBytes); 4980 } 4981 } 4982 4983 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 4984 #ifdef ASSERT 4985 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 4986 if (CheckCompressedOops) { 4987 Label ok; 4988 cbnz(src, ok); 4989 stop("null oop passed to encode_heap_oop_not_null2"); 4990 bind(ok); 4991 } 4992 #endif 4993 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 4994 4995 Register data = src; 4996 if (CompressedOops::base() != nullptr) { 4997 sub(dst, src, rheapbase); 4998 data = dst; 4999 } 5000 if (CompressedOops::shift() != 0) { 5001 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5002 lsr(dst, data, LogMinObjAlignmentInBytes); 5003 data = dst; 5004 } 5005 if (data == src) 5006 mov(dst, src); 5007 } 5008 5009 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5010 #ifdef ASSERT 5011 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5012 #endif 5013 if (CompressedOops::base() == nullptr) { 5014 if (CompressedOops::shift() != 0 || d != s) { 5015 lsl(d, s, CompressedOops::shift()); 5016 } 5017 } else { 5018 Label done; 5019 if (d != s) 5020 mov(d, s); 5021 cbz(s, done); 5022 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5023 bind(done); 5024 } 5025 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5026 } 5027 5028 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5029 assert (UseCompressedOops, "should only be used for compressed headers"); 5030 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5031 // Cannot assert, unverified entry point counts instructions (see .ad file) 5032 // vtableStubs also counts instructions in pd_code_size_limit. 5033 // Also do not verify_oop as this is called by verify_oop. 5034 if (CompressedOops::shift() != 0) { 5035 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5036 if (CompressedOops::base() != nullptr) { 5037 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5038 } else { 5039 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5040 } 5041 } else { 5042 assert (CompressedOops::base() == nullptr, "sanity"); 5043 } 5044 } 5045 5046 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5047 assert (UseCompressedOops, "should only be used for compressed headers"); 5048 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5049 // Cannot assert, unverified entry point counts instructions (see .ad file) 5050 // vtableStubs also counts instructions in pd_code_size_limit. 5051 // Also do not verify_oop as this is called by verify_oop. 5052 if (CompressedOops::shift() != 0) { 5053 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5054 if (CompressedOops::base() != nullptr) { 5055 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5056 } else { 5057 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5058 } 5059 } else { 5060 assert (CompressedOops::base() == nullptr, "sanity"); 5061 if (dst != src) { 5062 mov(dst, src); 5063 } 5064 } 5065 } 5066 5067 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5068 5069 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5070 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5071 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5072 5073 if (_klass_decode_mode != KlassDecodeNone) { 5074 return _klass_decode_mode; 5075 } 5076 5077 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 5078 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 5079 5080 if (CompressedKlassPointers::base() == nullptr) { 5081 return (_klass_decode_mode = KlassDecodeZero); 5082 } 5083 5084 if (operand_valid_for_logical_immediate( 5085 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 5086 const size_t range = CompressedKlassPointers::klass_range_end() - CompressedKlassPointers::base(); 5087 const uint64_t range_mask = (1ULL << log2i(range)) - 1; 5088 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 5089 return (_klass_decode_mode = KlassDecodeXor); 5090 } 5091 } 5092 5093 const uint64_t shifted_base = 5094 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5095 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 5096 "compressed class base bad alignment"); 5097 5098 return (_klass_decode_mode = KlassDecodeMovk); 5099 } 5100 5101 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5102 switch (klass_decode_mode()) { 5103 case KlassDecodeZero: 5104 if (CompressedKlassPointers::shift() != 0) { 5105 lsr(dst, src, LogKlassAlignmentInBytes); 5106 } else { 5107 if (dst != src) mov(dst, src); 5108 } 5109 break; 5110 5111 case KlassDecodeXor: 5112 if (CompressedKlassPointers::shift() != 0) { 5113 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5114 lsr(dst, dst, LogKlassAlignmentInBytes); 5115 } else { 5116 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5117 } 5118 break; 5119 5120 case KlassDecodeMovk: 5121 if (CompressedKlassPointers::shift() != 0) { 5122 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 5123 } else { 5124 movw(dst, src); 5125 } 5126 break; 5127 5128 case KlassDecodeNone: 5129 ShouldNotReachHere(); 5130 break; 5131 } 5132 } 5133 5134 void MacroAssembler::encode_klass_not_null(Register r) { 5135 encode_klass_not_null(r, r); 5136 } 5137 5138 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5139 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5140 5141 switch (klass_decode_mode()) { 5142 case KlassDecodeZero: 5143 if (CompressedKlassPointers::shift() != 0) { 5144 lsl(dst, src, LogKlassAlignmentInBytes); 5145 } else { 5146 if (dst != src) mov(dst, src); 5147 } 5148 break; 5149 5150 case KlassDecodeXor: 5151 if (CompressedKlassPointers::shift() != 0) { 5152 lsl(dst, src, LogKlassAlignmentInBytes); 5153 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5154 } else { 5155 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5156 } 5157 break; 5158 5159 case KlassDecodeMovk: { 5160 const uint64_t shifted_base = 5161 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5162 5163 if (dst != src) movw(dst, src); 5164 movk(dst, shifted_base >> 32, 32); 5165 5166 if (CompressedKlassPointers::shift() != 0) { 5167 lsl(dst, dst, LogKlassAlignmentInBytes); 5168 } 5169 5170 break; 5171 } 5172 5173 case KlassDecodeNone: 5174 ShouldNotReachHere(); 5175 break; 5176 } 5177 } 5178 5179 void MacroAssembler::decode_klass_not_null(Register r) { 5180 decode_klass_not_null(r, r); 5181 } 5182 5183 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5184 #ifdef ASSERT 5185 { 5186 ThreadInVMfromUnknown tiv; 5187 assert (UseCompressedOops, "should only be used for compressed oops"); 5188 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5189 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5190 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5191 } 5192 #endif 5193 int oop_index = oop_recorder()->find_index(obj); 5194 InstructionMark im(this); 5195 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5196 code_section()->relocate(inst_mark(), rspec); 5197 movz(dst, 0xDEAD, 16); 5198 movk(dst, 0xBEEF); 5199 } 5200 5201 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5202 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5203 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5204 int index = oop_recorder()->find_index(k); 5205 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5206 5207 InstructionMark im(this); 5208 RelocationHolder rspec = metadata_Relocation::spec(index); 5209 code_section()->relocate(inst_mark(), rspec); 5210 narrowKlass nk = CompressedKlassPointers::encode(k); 5211 movz(dst, (nk >> 16), 16); 5212 movk(dst, nk & 0xffff); 5213 } 5214 5215 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5216 Register dst, Address src, 5217 Register tmp1, Register tmp2) { 5218 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5219 decorators = AccessInternal::decorator_fixup(decorators, type); 5220 bool as_raw = (decorators & AS_RAW) != 0; 5221 if (as_raw) { 5222 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5223 } else { 5224 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5225 } 5226 } 5227 5228 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5229 Address dst, Register val, 5230 Register tmp1, Register tmp2, Register tmp3) { 5231 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5232 decorators = AccessInternal::decorator_fixup(decorators, type); 5233 bool as_raw = (decorators & AS_RAW) != 0; 5234 if (as_raw) { 5235 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5236 } else { 5237 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5238 } 5239 } 5240 5241 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5242 Register tmp2, DecoratorSet decorators) { 5243 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5244 } 5245 5246 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5247 Register tmp2, DecoratorSet decorators) { 5248 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5249 } 5250 5251 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5252 Register tmp2, Register tmp3, DecoratorSet decorators) { 5253 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5254 } 5255 5256 // Used for storing nulls. 5257 void MacroAssembler::store_heap_oop_null(Address dst) { 5258 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5259 } 5260 5261 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5262 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5263 int index = oop_recorder()->allocate_metadata_index(obj); 5264 RelocationHolder rspec = metadata_Relocation::spec(index); 5265 return Address((address)obj, rspec); 5266 } 5267 5268 // Move an oop into a register. 5269 void MacroAssembler::movoop(Register dst, jobject obj) { 5270 int oop_index; 5271 if (obj == nullptr) { 5272 oop_index = oop_recorder()->allocate_oop_index(obj); 5273 } else { 5274 #ifdef ASSERT 5275 { 5276 ThreadInVMfromUnknown tiv; 5277 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5278 } 5279 #endif 5280 oop_index = oop_recorder()->find_index(obj); 5281 } 5282 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5283 5284 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5285 mov(dst, Address((address)obj, rspec)); 5286 } else { 5287 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5288 ldr_constant(dst, Address(dummy, rspec)); 5289 } 5290 5291 } 5292 5293 // Move a metadata address into a register. 5294 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5295 int oop_index; 5296 if (obj == nullptr) { 5297 oop_index = oop_recorder()->allocate_metadata_index(obj); 5298 } else { 5299 oop_index = oop_recorder()->find_index(obj); 5300 } 5301 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5302 mov(dst, Address((address)obj, rspec)); 5303 } 5304 5305 Address MacroAssembler::constant_oop_address(jobject obj) { 5306 #ifdef ASSERT 5307 { 5308 ThreadInVMfromUnknown tiv; 5309 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5310 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5311 } 5312 #endif 5313 int oop_index = oop_recorder()->find_index(obj); 5314 return Address((address)obj, oop_Relocation::spec(oop_index)); 5315 } 5316 5317 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5318 void MacroAssembler::tlab_allocate(Register obj, 5319 Register var_size_in_bytes, 5320 int con_size_in_bytes, 5321 Register t1, 5322 Register t2, 5323 Label& slow_case) { 5324 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5325 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5326 } 5327 5328 void MacroAssembler::verify_tlab() { 5329 #ifdef ASSERT 5330 if (UseTLAB && VerifyOops) { 5331 Label next, ok; 5332 5333 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5334 5335 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5336 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5337 cmp(rscratch2, rscratch1); 5338 br(Assembler::HS, next); 5339 STOP("assert(top >= start)"); 5340 should_not_reach_here(); 5341 5342 bind(next); 5343 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5344 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5345 cmp(rscratch2, rscratch1); 5346 br(Assembler::HS, ok); 5347 STOP("assert(top <= end)"); 5348 should_not_reach_here(); 5349 5350 bind(ok); 5351 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5352 } 5353 #endif 5354 } 5355 5356 // Writes to stack successive pages until offset reached to check for 5357 // stack overflow + shadow pages. This clobbers tmp. 5358 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5359 assert_different_registers(tmp, size, rscratch1); 5360 mov(tmp, sp); 5361 // Bang stack for total size given plus shadow page size. 5362 // Bang one page at a time because large size can bang beyond yellow and 5363 // red zones. 5364 Label loop; 5365 mov(rscratch1, (int)os::vm_page_size()); 5366 bind(loop); 5367 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5368 subsw(size, size, rscratch1); 5369 str(size, Address(tmp)); 5370 br(Assembler::GT, loop); 5371 5372 // Bang down shadow pages too. 5373 // At this point, (tmp-0) is the last address touched, so don't 5374 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5375 // was post-decremented.) Skip this address by starting at i=1, and 5376 // touch a few more pages below. N.B. It is important to touch all 5377 // the way down to and including i=StackShadowPages. 5378 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5379 // this could be any sized move but this is can be a debugging crumb 5380 // so the bigger the better. 5381 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5382 str(size, Address(tmp)); 5383 } 5384 } 5385 5386 // Move the address of the polling page into dest. 5387 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5388 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5389 } 5390 5391 // Read the polling page. The address of the polling page must 5392 // already be in r. 5393 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5394 address mark; 5395 { 5396 InstructionMark im(this); 5397 code_section()->relocate(inst_mark(), rtype); 5398 ldrw(zr, Address(r, 0)); 5399 mark = inst_mark(); 5400 } 5401 verify_cross_modify_fence_not_required(); 5402 return mark; 5403 } 5404 5405 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5406 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5407 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5408 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5409 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5410 int64_t offset_low = dest_page - low_page; 5411 int64_t offset_high = dest_page - high_page; 5412 5413 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5414 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5415 5416 InstructionMark im(this); 5417 code_section()->relocate(inst_mark(), dest.rspec()); 5418 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5419 // the code cache so that if it is relocated we know it will still reach 5420 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5421 _adrp(reg1, dest.target()); 5422 } else { 5423 uint64_t target = (uint64_t)dest.target(); 5424 uint64_t adrp_target 5425 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5426 5427 _adrp(reg1, (address)adrp_target); 5428 movk(reg1, target >> 32, 32); 5429 } 5430 byte_offset = (uint64_t)dest.target() & 0xfff; 5431 } 5432 5433 void MacroAssembler::load_byte_map_base(Register reg) { 5434 CardTable::CardValue* byte_map_base = 5435 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5436 5437 // Strictly speaking the byte_map_base isn't an address at all, and it might 5438 // even be negative. It is thus materialised as a constant. 5439 mov(reg, (uint64_t)byte_map_base); 5440 } 5441 5442 void MacroAssembler::build_frame(int framesize) { 5443 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5444 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5445 protect_return_address(); 5446 if (framesize < ((1 << 9) + 2 * wordSize)) { 5447 sub(sp, sp, framesize); 5448 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5449 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5450 } else { 5451 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5452 if (PreserveFramePointer) mov(rfp, sp); 5453 if (framesize < ((1 << 12) + 2 * wordSize)) 5454 sub(sp, sp, framesize - 2 * wordSize); 5455 else { 5456 mov(rscratch1, framesize - 2 * wordSize); 5457 sub(sp, sp, rscratch1); 5458 } 5459 } 5460 verify_cross_modify_fence_not_required(); 5461 } 5462 5463 void MacroAssembler::remove_frame(int framesize) { 5464 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5465 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5466 if (framesize < ((1 << 9) + 2 * wordSize)) { 5467 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5468 add(sp, sp, framesize); 5469 } else { 5470 if (framesize < ((1 << 12) + 2 * wordSize)) 5471 add(sp, sp, framesize - 2 * wordSize); 5472 else { 5473 mov(rscratch1, framesize - 2 * wordSize); 5474 add(sp, sp, rscratch1); 5475 } 5476 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5477 } 5478 authenticate_return_address(); 5479 } 5480 5481 5482 // This method counts leading positive bytes (highest bit not set) in provided byte array 5483 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5484 // Simple and most common case of aligned small array which is not at the 5485 // end of memory page is placed here. All other cases are in stub. 5486 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5487 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5488 assert_different_registers(ary1, len, result); 5489 5490 mov(result, len); 5491 cmpw(len, 0); 5492 br(LE, DONE); 5493 cmpw(len, 4 * wordSize); 5494 br(GE, STUB_LONG); // size > 32 then go to stub 5495 5496 int shift = 64 - exact_log2(os::vm_page_size()); 5497 lsl(rscratch1, ary1, shift); 5498 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5499 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5500 br(CS, STUB); // at the end of page then go to stub 5501 subs(len, len, wordSize); 5502 br(LT, END); 5503 5504 BIND(LOOP); 5505 ldr(rscratch1, Address(post(ary1, wordSize))); 5506 tst(rscratch1, UPPER_BIT_MASK); 5507 br(NE, SET_RESULT); 5508 subs(len, len, wordSize); 5509 br(GE, LOOP); 5510 cmpw(len, -wordSize); 5511 br(EQ, DONE); 5512 5513 BIND(END); 5514 ldr(rscratch1, Address(ary1)); 5515 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5516 lslv(rscratch1, rscratch1, rscratch2); 5517 tst(rscratch1, UPPER_BIT_MASK); 5518 br(NE, SET_RESULT); 5519 b(DONE); 5520 5521 BIND(STUB); 5522 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5523 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5524 address tpc1 = trampoline_call(count_pos); 5525 if (tpc1 == nullptr) { 5526 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5527 postcond(pc() == badAddress); 5528 return nullptr; 5529 } 5530 b(DONE); 5531 5532 BIND(STUB_LONG); 5533 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5534 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5535 address tpc2 = trampoline_call(count_pos_long); 5536 if (tpc2 == nullptr) { 5537 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5538 postcond(pc() == badAddress); 5539 return nullptr; 5540 } 5541 b(DONE); 5542 5543 BIND(SET_RESULT); 5544 5545 add(len, len, wordSize); 5546 sub(result, result, len); 5547 5548 BIND(DONE); 5549 postcond(pc() != badAddress); 5550 return pc(); 5551 } 5552 5553 // Clobbers: rscratch1, rscratch2, rflags 5554 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5555 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5556 Register tmp4, Register tmp5, Register result, 5557 Register cnt1, int elem_size) { 5558 Label DONE, SAME; 5559 Register tmp1 = rscratch1; 5560 Register tmp2 = rscratch2; 5561 int elem_per_word = wordSize/elem_size; 5562 int log_elem_size = exact_log2(elem_size); 5563 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5564 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5565 int base_offset 5566 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5567 // When the length offset is not aligned to 8 bytes, 5568 // then we align it down. This is valid because the new 5569 // offset will always be the klass which is the same 5570 // for type arrays. 5571 int start_offset = align_down(length_offset, BytesPerWord); 5572 int extra_length = base_offset - start_offset; 5573 assert(start_offset == length_offset || start_offset == klass_offset, 5574 "start offset must be 8-byte-aligned or be the klass offset"); 5575 assert(base_offset != start_offset, "must include the length field"); 5576 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5577 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5578 5579 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5580 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5581 5582 #ifndef PRODUCT 5583 { 5584 const char kind = (elem_size == 2) ? 'U' : 'L'; 5585 char comment[64]; 5586 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5587 BLOCK_COMMENT(comment); 5588 } 5589 #endif 5590 5591 // if (a1 == a2) 5592 // return true; 5593 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5594 br(EQ, SAME); 5595 5596 if (UseSimpleArrayEquals) { 5597 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5598 // if (a1 == nullptr || a2 == nullptr) 5599 // return false; 5600 // a1 & a2 == 0 means (some-pointer is null) or 5601 // (very-rare-or-even-probably-impossible-pointer-values) 5602 // so, we can save one branch in most cases 5603 tst(a1, a2); 5604 mov(result, false); 5605 br(EQ, A_MIGHT_BE_NULL); 5606 // if (a1.length != a2.length) 5607 // return false; 5608 bind(A_IS_NOT_NULL); 5609 ldrw(cnt1, Address(a1, length_offset)); 5610 // Increase loop counter by diff between base- and actual start-offset. 5611 addw(cnt1, cnt1, extra_length); 5612 lea(a1, Address(a1, start_offset)); 5613 lea(a2, Address(a2, start_offset)); 5614 // Check for short strings, i.e. smaller than wordSize. 5615 subs(cnt1, cnt1, elem_per_word); 5616 br(Assembler::LT, SHORT); 5617 // Main 8 byte comparison loop. 5618 bind(NEXT_WORD); { 5619 ldr(tmp1, Address(post(a1, wordSize))); 5620 ldr(tmp2, Address(post(a2, wordSize))); 5621 subs(cnt1, cnt1, elem_per_word); 5622 eor(tmp5, tmp1, tmp2); 5623 cbnz(tmp5, DONE); 5624 } br(GT, NEXT_WORD); 5625 // Last longword. In the case where length == 4 we compare the 5626 // same longword twice, but that's still faster than another 5627 // conditional branch. 5628 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5629 // length == 4. 5630 if (log_elem_size > 0) 5631 lsl(cnt1, cnt1, log_elem_size); 5632 ldr(tmp3, Address(a1, cnt1)); 5633 ldr(tmp4, Address(a2, cnt1)); 5634 eor(tmp5, tmp3, tmp4); 5635 cbnz(tmp5, DONE); 5636 b(SAME); 5637 bind(A_MIGHT_BE_NULL); 5638 // in case both a1 and a2 are not-null, proceed with loads 5639 cbz(a1, DONE); 5640 cbz(a2, DONE); 5641 b(A_IS_NOT_NULL); 5642 bind(SHORT); 5643 5644 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5645 { 5646 ldrw(tmp1, Address(post(a1, 4))); 5647 ldrw(tmp2, Address(post(a2, 4))); 5648 eorw(tmp5, tmp1, tmp2); 5649 cbnzw(tmp5, DONE); 5650 } 5651 bind(TAIL03); 5652 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5653 { 5654 ldrh(tmp3, Address(post(a1, 2))); 5655 ldrh(tmp4, Address(post(a2, 2))); 5656 eorw(tmp5, tmp3, tmp4); 5657 cbnzw(tmp5, DONE); 5658 } 5659 bind(TAIL01); 5660 if (elem_size == 1) { // Only needed when comparing byte arrays. 5661 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5662 { 5663 ldrb(tmp1, a1); 5664 ldrb(tmp2, a2); 5665 eorw(tmp5, tmp1, tmp2); 5666 cbnzw(tmp5, DONE); 5667 } 5668 } 5669 } else { 5670 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5671 CSET_EQ, LAST_CHECK; 5672 mov(result, false); 5673 cbz(a1, DONE); 5674 ldrw(cnt1, Address(a1, length_offset)); 5675 cbz(a2, DONE); 5676 // Increase loop counter by diff between base- and actual start-offset. 5677 addw(cnt1, cnt1, extra_length); 5678 5679 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5680 // faster to perform another branch before comparing a1 and a2 5681 cmp(cnt1, (u1)elem_per_word); 5682 br(LE, SHORT); // short or same 5683 ldr(tmp3, Address(pre(a1, start_offset))); 5684 subs(zr, cnt1, stubBytesThreshold); 5685 br(GE, STUB); 5686 ldr(tmp4, Address(pre(a2, start_offset))); 5687 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5688 5689 // Main 16 byte comparison loop with 2 exits 5690 bind(NEXT_DWORD); { 5691 ldr(tmp1, Address(pre(a1, wordSize))); 5692 ldr(tmp2, Address(pre(a2, wordSize))); 5693 subs(cnt1, cnt1, 2 * elem_per_word); 5694 br(LE, TAIL); 5695 eor(tmp4, tmp3, tmp4); 5696 cbnz(tmp4, DONE); 5697 ldr(tmp3, Address(pre(a1, wordSize))); 5698 ldr(tmp4, Address(pre(a2, wordSize))); 5699 cmp(cnt1, (u1)elem_per_word); 5700 br(LE, TAIL2); 5701 cmp(tmp1, tmp2); 5702 } br(EQ, NEXT_DWORD); 5703 b(DONE); 5704 5705 bind(TAIL); 5706 eor(tmp4, tmp3, tmp4); 5707 eor(tmp2, tmp1, tmp2); 5708 lslv(tmp2, tmp2, tmp5); 5709 orr(tmp5, tmp4, tmp2); 5710 cmp(tmp5, zr); 5711 b(CSET_EQ); 5712 5713 bind(TAIL2); 5714 eor(tmp2, tmp1, tmp2); 5715 cbnz(tmp2, DONE); 5716 b(LAST_CHECK); 5717 5718 bind(STUB); 5719 ldr(tmp4, Address(pre(a2, start_offset))); 5720 if (elem_size == 2) { // convert to byte counter 5721 lsl(cnt1, cnt1, 1); 5722 } 5723 eor(tmp5, tmp3, tmp4); 5724 cbnz(tmp5, DONE); 5725 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5726 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5727 address tpc = trampoline_call(stub); 5728 if (tpc == nullptr) { 5729 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5730 postcond(pc() == badAddress); 5731 return nullptr; 5732 } 5733 b(DONE); 5734 5735 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5736 // so, if a2 == null => return false(0), else return true, so we can return a2 5737 mov(result, a2); 5738 b(DONE); 5739 bind(SHORT); 5740 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5741 ldr(tmp3, Address(a1, start_offset)); 5742 ldr(tmp4, Address(a2, start_offset)); 5743 bind(LAST_CHECK); 5744 eor(tmp4, tmp3, tmp4); 5745 lslv(tmp5, tmp4, tmp5); 5746 cmp(tmp5, zr); 5747 bind(CSET_EQ); 5748 cset(result, EQ); 5749 b(DONE); 5750 } 5751 5752 bind(SAME); 5753 mov(result, true); 5754 // That's it. 5755 bind(DONE); 5756 5757 BLOCK_COMMENT("} array_equals"); 5758 postcond(pc() != badAddress); 5759 return pc(); 5760 } 5761 5762 // Compare Strings 5763 5764 // For Strings we're passed the address of the first characters in a1 5765 // and a2 and the length in cnt1. 5766 // There are two implementations. For arrays >= 8 bytes, all 5767 // comparisons (including the final one, which may overlap) are 5768 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5769 // halfword, then a short, and then a byte. 5770 5771 void MacroAssembler::string_equals(Register a1, Register a2, 5772 Register result, Register cnt1) 5773 { 5774 Label SAME, DONE, SHORT, NEXT_WORD; 5775 Register tmp1 = rscratch1; 5776 Register tmp2 = rscratch2; 5777 Register cnt2 = tmp2; // cnt2 only used in array length compare 5778 5779 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5780 5781 #ifndef PRODUCT 5782 { 5783 char comment[64]; 5784 snprintf(comment, sizeof comment, "{string_equalsL"); 5785 BLOCK_COMMENT(comment); 5786 } 5787 #endif 5788 5789 mov(result, false); 5790 5791 // Check for short strings, i.e. smaller than wordSize. 5792 subs(cnt1, cnt1, wordSize); 5793 br(Assembler::LT, SHORT); 5794 // Main 8 byte comparison loop. 5795 bind(NEXT_WORD); { 5796 ldr(tmp1, Address(post(a1, wordSize))); 5797 ldr(tmp2, Address(post(a2, wordSize))); 5798 subs(cnt1, cnt1, wordSize); 5799 eor(tmp1, tmp1, tmp2); 5800 cbnz(tmp1, DONE); 5801 } br(GT, NEXT_WORD); 5802 // Last longword. In the case where length == 4 we compare the 5803 // same longword twice, but that's still faster than another 5804 // conditional branch. 5805 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5806 // length == 4. 5807 ldr(tmp1, Address(a1, cnt1)); 5808 ldr(tmp2, Address(a2, cnt1)); 5809 eor(tmp2, tmp1, tmp2); 5810 cbnz(tmp2, DONE); 5811 b(SAME); 5812 5813 bind(SHORT); 5814 Label TAIL03, TAIL01; 5815 5816 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5817 { 5818 ldrw(tmp1, Address(post(a1, 4))); 5819 ldrw(tmp2, Address(post(a2, 4))); 5820 eorw(tmp1, tmp1, tmp2); 5821 cbnzw(tmp1, DONE); 5822 } 5823 bind(TAIL03); 5824 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5825 { 5826 ldrh(tmp1, Address(post(a1, 2))); 5827 ldrh(tmp2, Address(post(a2, 2))); 5828 eorw(tmp1, tmp1, tmp2); 5829 cbnzw(tmp1, DONE); 5830 } 5831 bind(TAIL01); 5832 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5833 { 5834 ldrb(tmp1, a1); 5835 ldrb(tmp2, a2); 5836 eorw(tmp1, tmp1, tmp2); 5837 cbnzw(tmp1, DONE); 5838 } 5839 // Arrays are equal. 5840 bind(SAME); 5841 mov(result, true); 5842 5843 // That's it. 5844 bind(DONE); 5845 BLOCK_COMMENT("} string_equals"); 5846 } 5847 5848 5849 // The size of the blocks erased by the zero_blocks stub. We must 5850 // handle anything smaller than this ourselves in zero_words(). 5851 const int MacroAssembler::zero_words_block_size = 8; 5852 5853 // zero_words() is used by C2 ClearArray patterns and by 5854 // C1_MacroAssembler. It is as small as possible, handling small word 5855 // counts locally and delegating anything larger to the zero_blocks 5856 // stub. It is expanded many times in compiled code, so it is 5857 // important to keep it short. 5858 5859 // ptr: Address of a buffer to be zeroed. 5860 // cnt: Count in HeapWords. 5861 // 5862 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5863 address MacroAssembler::zero_words(Register ptr, Register cnt) 5864 { 5865 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5866 5867 BLOCK_COMMENT("zero_words {"); 5868 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5869 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5870 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5871 5872 subs(rscratch1, cnt, zero_words_block_size); 5873 Label around; 5874 br(LO, around); 5875 { 5876 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5877 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5878 // Make sure this is a C2 compilation. C1 allocates space only for 5879 // trampoline stubs generated by Call LIR ops, and in any case it 5880 // makes sense for a C1 compilation task to proceed as quickly as 5881 // possible. 5882 CompileTask* task; 5883 if (StubRoutines::aarch64::complete() 5884 && Thread::current()->is_Compiler_thread() 5885 && (task = ciEnv::current()->task()) 5886 && is_c2_compile(task->comp_level())) { 5887 address tpc = trampoline_call(zero_blocks); 5888 if (tpc == nullptr) { 5889 DEBUG_ONLY(reset_labels(around)); 5890 return nullptr; 5891 } 5892 } else { 5893 far_call(zero_blocks); 5894 } 5895 } 5896 bind(around); 5897 5898 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5899 // for us. 5900 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5901 Label l; 5902 tbz(cnt, exact_log2(i), l); 5903 for (int j = 0; j < i; j += 2) { 5904 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5905 } 5906 bind(l); 5907 } 5908 { 5909 Label l; 5910 tbz(cnt, 0, l); 5911 str(zr, Address(ptr)); 5912 bind(l); 5913 } 5914 5915 BLOCK_COMMENT("} zero_words"); 5916 return pc(); 5917 } 5918 5919 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5920 // cnt: Immediate count in HeapWords. 5921 // 5922 // r10, r11, rscratch1, and rscratch2 are clobbered. 5923 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5924 { 5925 assert(wordSize <= BlockZeroingLowLimit, 5926 "increase BlockZeroingLowLimit"); 5927 address result = nullptr; 5928 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5929 #ifndef PRODUCT 5930 { 5931 char buf[64]; 5932 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5933 BLOCK_COMMENT(buf); 5934 } 5935 #endif 5936 if (cnt >= 16) { 5937 uint64_t loops = cnt/16; 5938 if (loops > 1) { 5939 mov(rscratch2, loops - 1); 5940 } 5941 { 5942 Label loop; 5943 bind(loop); 5944 for (int i = 0; i < 16; i += 2) { 5945 stp(zr, zr, Address(base, i * BytesPerWord)); 5946 } 5947 add(base, base, 16 * BytesPerWord); 5948 if (loops > 1) { 5949 subs(rscratch2, rscratch2, 1); 5950 br(GE, loop); 5951 } 5952 } 5953 } 5954 cnt %= 16; 5955 int i = cnt & 1; // store any odd word to start 5956 if (i) str(zr, Address(base)); 5957 for (; i < (int)cnt; i += 2) { 5958 stp(zr, zr, Address(base, i * wordSize)); 5959 } 5960 BLOCK_COMMENT("} zero_words"); 5961 result = pc(); 5962 } else { 5963 mov(r10, base); mov(r11, cnt); 5964 result = zero_words(r10, r11); 5965 } 5966 return result; 5967 } 5968 5969 // Zero blocks of memory by using DC ZVA. 5970 // 5971 // Aligns the base address first sufficiently for DC ZVA, then uses 5972 // DC ZVA repeatedly for every full block. cnt is the size to be 5973 // zeroed in HeapWords. Returns the count of words left to be zeroed 5974 // in cnt. 5975 // 5976 // NOTE: This is intended to be used in the zero_blocks() stub. If 5977 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 5978 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 5979 Register tmp = rscratch1; 5980 Register tmp2 = rscratch2; 5981 int zva_length = VM_Version::zva_length(); 5982 Label initial_table_end, loop_zva; 5983 Label fini; 5984 5985 // Base must be 16 byte aligned. If not just return and let caller handle it 5986 tst(base, 0x0f); 5987 br(Assembler::NE, fini); 5988 // Align base with ZVA length. 5989 neg(tmp, base); 5990 andr(tmp, tmp, zva_length - 1); 5991 5992 // tmp: the number of bytes to be filled to align the base with ZVA length. 5993 add(base, base, tmp); 5994 sub(cnt, cnt, tmp, Assembler::ASR, 3); 5995 adr(tmp2, initial_table_end); 5996 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 5997 br(tmp2); 5998 5999 for (int i = -zva_length + 16; i < 0; i += 16) 6000 stp(zr, zr, Address(base, i)); 6001 bind(initial_table_end); 6002 6003 sub(cnt, cnt, zva_length >> 3); 6004 bind(loop_zva); 6005 dc(Assembler::ZVA, base); 6006 subs(cnt, cnt, zva_length >> 3); 6007 add(base, base, zva_length); 6008 br(Assembler::GE, loop_zva); 6009 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6010 bind(fini); 6011 } 6012 6013 // base: Address of a buffer to be filled, 8 bytes aligned. 6014 // cnt: Count in 8-byte unit. 6015 // value: Value to be filled with. 6016 // base will point to the end of the buffer after filling. 6017 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6018 { 6019 // Algorithm: 6020 // 6021 // if (cnt == 0) { 6022 // return; 6023 // } 6024 // if ((p & 8) != 0) { 6025 // *p++ = v; 6026 // } 6027 // 6028 // scratch1 = cnt & 14; 6029 // cnt -= scratch1; 6030 // p += scratch1; 6031 // switch (scratch1 / 2) { 6032 // do { 6033 // cnt -= 16; 6034 // p[-16] = v; 6035 // p[-15] = v; 6036 // case 7: 6037 // p[-14] = v; 6038 // p[-13] = v; 6039 // case 6: 6040 // p[-12] = v; 6041 // p[-11] = v; 6042 // // ... 6043 // case 1: 6044 // p[-2] = v; 6045 // p[-1] = v; 6046 // case 0: 6047 // p += 16; 6048 // } while (cnt); 6049 // } 6050 // if ((cnt & 1) == 1) { 6051 // *p++ = v; 6052 // } 6053 6054 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6055 6056 Label fini, skip, entry, loop; 6057 const int unroll = 8; // Number of stp instructions we'll unroll 6058 6059 cbz(cnt, fini); 6060 tbz(base, 3, skip); 6061 str(value, Address(post(base, 8))); 6062 sub(cnt, cnt, 1); 6063 bind(skip); 6064 6065 andr(rscratch1, cnt, (unroll-1) * 2); 6066 sub(cnt, cnt, rscratch1); 6067 add(base, base, rscratch1, Assembler::LSL, 3); 6068 adr(rscratch2, entry); 6069 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6070 br(rscratch2); 6071 6072 bind(loop); 6073 add(base, base, unroll * 16); 6074 for (int i = -unroll; i < 0; i++) 6075 stp(value, value, Address(base, i * 16)); 6076 bind(entry); 6077 subs(cnt, cnt, unroll * 2); 6078 br(Assembler::GE, loop); 6079 6080 tbz(cnt, 0, fini); 6081 str(value, Address(post(base, 8))); 6082 bind(fini); 6083 } 6084 6085 // Intrinsic for 6086 // 6087 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6088 // return the number of characters copied. 6089 // - java/lang/StringUTF16.compress 6090 // return index of non-latin1 character if copy fails, otherwise 'len'. 6091 // 6092 // This version always returns the number of characters copied, and does not 6093 // clobber the 'len' register. A successful copy will complete with the post- 6094 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6095 // post-condition: 0 <= 'res' < 'len'. 6096 // 6097 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6098 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6099 // beyond the acceptable, even though the footprint would be smaller. 6100 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6101 // avoid additional bloat. 6102 // 6103 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6104 void MacroAssembler::encode_iso_array(Register src, Register dst, 6105 Register len, Register res, bool ascii, 6106 FloatRegister vtmp0, FloatRegister vtmp1, 6107 FloatRegister vtmp2, FloatRegister vtmp3, 6108 FloatRegister vtmp4, FloatRegister vtmp5) 6109 { 6110 Register cnt = res; 6111 Register max = rscratch1; 6112 Register chk = rscratch2; 6113 6114 prfm(Address(src), PLDL1STRM); 6115 movw(cnt, len); 6116 6117 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6118 6119 Label LOOP_32, DONE_32, FAIL_32; 6120 6121 BIND(LOOP_32); 6122 { 6123 cmpw(cnt, 32); 6124 br(LT, DONE_32); 6125 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6126 // Extract lower bytes. 6127 FloatRegister vlo0 = vtmp4; 6128 FloatRegister vlo1 = vtmp5; 6129 uzp1(vlo0, T16B, vtmp0, vtmp1); 6130 uzp1(vlo1, T16B, vtmp2, vtmp3); 6131 // Merge bits... 6132 orr(vtmp0, T16B, vtmp0, vtmp1); 6133 orr(vtmp2, T16B, vtmp2, vtmp3); 6134 // Extract merged upper bytes. 6135 FloatRegister vhix = vtmp0; 6136 uzp2(vhix, T16B, vtmp0, vtmp2); 6137 // ISO-check on hi-parts (all zero). 6138 // ASCII-check on lo-parts (no sign). 6139 FloatRegister vlox = vtmp1; // Merge lower bytes. 6140 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6141 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6142 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6143 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6144 ASCII(orr(chk, chk, max)); 6145 cbnz(chk, FAIL_32); 6146 subw(cnt, cnt, 32); 6147 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6148 b(LOOP_32); 6149 } 6150 BIND(FAIL_32); 6151 sub(src, src, 64); 6152 BIND(DONE_32); 6153 6154 Label LOOP_8, SKIP_8; 6155 6156 BIND(LOOP_8); 6157 { 6158 cmpw(cnt, 8); 6159 br(LT, SKIP_8); 6160 FloatRegister vhi = vtmp0; 6161 FloatRegister vlo = vtmp1; 6162 ld1(vtmp3, T8H, src); 6163 uzp1(vlo, T16B, vtmp3, vtmp3); 6164 uzp2(vhi, T16B, vtmp3, vtmp3); 6165 // ISO-check on hi-parts (all zero). 6166 // ASCII-check on lo-parts (no sign). 6167 ASCII(cm(LT, vtmp2, T16B, vlo)); 6168 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6169 ASCII(umov(max, vtmp2, B, 0)); 6170 ASCII(orr(chk, chk, max)); 6171 cbnz(chk, SKIP_8); 6172 6173 strd(vlo, Address(post(dst, 8))); 6174 subw(cnt, cnt, 8); 6175 add(src, src, 16); 6176 b(LOOP_8); 6177 } 6178 BIND(SKIP_8); 6179 6180 #undef ASCII 6181 6182 Label LOOP, DONE; 6183 6184 cbz(cnt, DONE); 6185 BIND(LOOP); 6186 { 6187 Register chr = rscratch1; 6188 ldrh(chr, Address(post(src, 2))); 6189 tst(chr, ascii ? 0xff80 : 0xff00); 6190 br(NE, DONE); 6191 strb(chr, Address(post(dst, 1))); 6192 subs(cnt, cnt, 1); 6193 br(GT, LOOP); 6194 } 6195 BIND(DONE); 6196 // Return index where we stopped. 6197 subw(res, len, cnt); 6198 } 6199 6200 // Inflate byte[] array to char[]. 6201 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6202 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6203 FloatRegister vtmp1, FloatRegister vtmp2, 6204 FloatRegister vtmp3, Register tmp4) { 6205 Label big, done, after_init, to_stub; 6206 6207 assert_different_registers(src, dst, len, tmp4, rscratch1); 6208 6209 fmovd(vtmp1, 0.0); 6210 lsrw(tmp4, len, 3); 6211 bind(after_init); 6212 cbnzw(tmp4, big); 6213 // Short string: less than 8 bytes. 6214 { 6215 Label loop, tiny; 6216 6217 cmpw(len, 4); 6218 br(LT, tiny); 6219 // Use SIMD to do 4 bytes. 6220 ldrs(vtmp2, post(src, 4)); 6221 zip1(vtmp3, T8B, vtmp2, vtmp1); 6222 subw(len, len, 4); 6223 strd(vtmp3, post(dst, 8)); 6224 6225 cbzw(len, done); 6226 6227 // Do the remaining bytes by steam. 6228 bind(loop); 6229 ldrb(tmp4, post(src, 1)); 6230 strh(tmp4, post(dst, 2)); 6231 subw(len, len, 1); 6232 6233 bind(tiny); 6234 cbnz(len, loop); 6235 6236 b(done); 6237 } 6238 6239 if (SoftwarePrefetchHintDistance >= 0) { 6240 bind(to_stub); 6241 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6242 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6243 address tpc = trampoline_call(stub); 6244 if (tpc == nullptr) { 6245 DEBUG_ONLY(reset_labels(big, done)); 6246 postcond(pc() == badAddress); 6247 return nullptr; 6248 } 6249 b(after_init); 6250 } 6251 6252 // Unpack the bytes 8 at a time. 6253 bind(big); 6254 { 6255 Label loop, around, loop_last, loop_start; 6256 6257 if (SoftwarePrefetchHintDistance >= 0) { 6258 const int large_loop_threshold = (64 + 16)/8; 6259 ldrd(vtmp2, post(src, 8)); 6260 andw(len, len, 7); 6261 cmp(tmp4, (u1)large_loop_threshold); 6262 br(GE, to_stub); 6263 b(loop_start); 6264 6265 bind(loop); 6266 ldrd(vtmp2, post(src, 8)); 6267 bind(loop_start); 6268 subs(tmp4, tmp4, 1); 6269 br(EQ, loop_last); 6270 zip1(vtmp2, T16B, vtmp2, vtmp1); 6271 ldrd(vtmp3, post(src, 8)); 6272 st1(vtmp2, T8H, post(dst, 16)); 6273 subs(tmp4, tmp4, 1); 6274 zip1(vtmp3, T16B, vtmp3, vtmp1); 6275 st1(vtmp3, T8H, post(dst, 16)); 6276 br(NE, loop); 6277 b(around); 6278 bind(loop_last); 6279 zip1(vtmp2, T16B, vtmp2, vtmp1); 6280 st1(vtmp2, T8H, post(dst, 16)); 6281 bind(around); 6282 cbz(len, done); 6283 } else { 6284 andw(len, len, 7); 6285 bind(loop); 6286 ldrd(vtmp2, post(src, 8)); 6287 sub(tmp4, tmp4, 1); 6288 zip1(vtmp3, T16B, vtmp2, vtmp1); 6289 st1(vtmp3, T8H, post(dst, 16)); 6290 cbnz(tmp4, loop); 6291 } 6292 } 6293 6294 // Do the tail of up to 8 bytes. 6295 add(src, src, len); 6296 ldrd(vtmp3, Address(src, -8)); 6297 add(dst, dst, len, ext::uxtw, 1); 6298 zip1(vtmp3, T16B, vtmp3, vtmp1); 6299 strq(vtmp3, Address(dst, -16)); 6300 6301 bind(done); 6302 postcond(pc() != badAddress); 6303 return pc(); 6304 } 6305 6306 // Compress char[] array to byte[]. 6307 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6308 // Return the array length if every element in array can be encoded, 6309 // otherwise, the index of first non-latin1 (> 0xff) character. 6310 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6311 Register res, 6312 FloatRegister tmp0, FloatRegister tmp1, 6313 FloatRegister tmp2, FloatRegister tmp3, 6314 FloatRegister tmp4, FloatRegister tmp5) { 6315 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6316 } 6317 6318 // java.math.round(double a) 6319 // Returns the closest long to the argument, with ties rounding to 6320 // positive infinity. This requires some fiddling for corner 6321 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6322 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6323 FloatRegister ftmp) { 6324 Label DONE; 6325 BLOCK_COMMENT("java_round_double: { "); 6326 fmovd(rscratch1, src); 6327 // Use RoundToNearestTiesAway unless src small and -ve. 6328 fcvtasd(dst, src); 6329 // Test if src >= 0 || abs(src) >= 0x1.0p52 6330 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6331 mov(rscratch2, julong_cast(0x1.0p52)); 6332 cmp(rscratch1, rscratch2); 6333 br(HS, DONE); { 6334 // src < 0 && abs(src) < 0x1.0p52 6335 // src may have a fractional part, so add 0.5 6336 fmovd(ftmp, 0.5); 6337 faddd(ftmp, src, ftmp); 6338 // Convert double to jlong, use RoundTowardsNegative 6339 fcvtmsd(dst, ftmp); 6340 } 6341 bind(DONE); 6342 BLOCK_COMMENT("} java_round_double"); 6343 } 6344 6345 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6346 FloatRegister ftmp) { 6347 Label DONE; 6348 BLOCK_COMMENT("java_round_float: { "); 6349 fmovs(rscratch1, src); 6350 // Use RoundToNearestTiesAway unless src small and -ve. 6351 fcvtassw(dst, src); 6352 // Test if src >= 0 || abs(src) >= 0x1.0p23 6353 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6354 mov(rscratch2, jint_cast(0x1.0p23f)); 6355 cmp(rscratch1, rscratch2); 6356 br(HS, DONE); { 6357 // src < 0 && |src| < 0x1.0p23 6358 // src may have a fractional part, so add 0.5 6359 fmovs(ftmp, 0.5f); 6360 fadds(ftmp, src, ftmp); 6361 // Convert float to jint, use RoundTowardsNegative 6362 fcvtmssw(dst, ftmp); 6363 } 6364 bind(DONE); 6365 BLOCK_COMMENT("} java_round_float"); 6366 } 6367 6368 // get_thread() can be called anywhere inside generated code so we 6369 // need to save whatever non-callee save context might get clobbered 6370 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6371 // the call setup code. 6372 // 6373 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6374 // On other systems, the helper is a usual C function. 6375 // 6376 void MacroAssembler::get_thread(Register dst) { 6377 RegSet saved_regs = 6378 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6379 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6380 6381 protect_return_address(); 6382 push(saved_regs, sp); 6383 6384 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6385 blr(lr); 6386 if (dst != c_rarg0) { 6387 mov(dst, c_rarg0); 6388 } 6389 6390 pop(saved_regs, sp); 6391 authenticate_return_address(); 6392 } 6393 6394 void MacroAssembler::cache_wb(Address line) { 6395 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6396 assert(line.index() == noreg, "index should be noreg"); 6397 assert(line.offset() == 0, "offset should be 0"); 6398 // would like to assert this 6399 // assert(line._ext.shift == 0, "shift should be zero"); 6400 if (VM_Version::supports_dcpop()) { 6401 // writeback using clear virtual address to point of persistence 6402 dc(Assembler::CVAP, line.base()); 6403 } else { 6404 // no need to generate anything as Unsafe.writebackMemory should 6405 // never invoke this stub 6406 } 6407 } 6408 6409 void MacroAssembler::cache_wbsync(bool is_pre) { 6410 // we only need a barrier post sync 6411 if (!is_pre) { 6412 membar(Assembler::AnyAny); 6413 } 6414 } 6415 6416 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6417 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) { 6418 return; 6419 } 6420 // Make sure that native code does not change SVE vector length. 6421 Label verify_ok; 6422 movw(tmp, zr); 6423 sve_inc(tmp, B); 6424 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6425 br(EQ, verify_ok); 6426 stop("Error: SVE vector length has changed since jvm startup"); 6427 bind(verify_ok); 6428 } 6429 6430 void MacroAssembler::verify_ptrue() { 6431 Label verify_ok; 6432 if (!UseSVE) { 6433 return; 6434 } 6435 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6436 sve_dec(rscratch1, B); 6437 cbz(rscratch1, verify_ok); 6438 stop("Error: the preserved predicate register (p7) elements are not all true"); 6439 bind(verify_ok); 6440 } 6441 6442 void MacroAssembler::safepoint_isb() { 6443 isb(); 6444 #ifndef PRODUCT 6445 if (VerifyCrossModifyFence) { 6446 // Clear the thread state. 6447 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6448 } 6449 #endif 6450 } 6451 6452 #ifndef PRODUCT 6453 void MacroAssembler::verify_cross_modify_fence_not_required() { 6454 if (VerifyCrossModifyFence) { 6455 // Check if thread needs a cross modify fence. 6456 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6457 Label fence_not_required; 6458 cbz(rscratch1, fence_not_required); 6459 // If it does then fail. 6460 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure))); 6461 mov(c_rarg0, rthread); 6462 blr(rscratch1); 6463 bind(fence_not_required); 6464 } 6465 } 6466 #endif 6467 6468 void MacroAssembler::spin_wait() { 6469 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6470 switch (VM_Version::spin_wait_desc().inst()) { 6471 case SpinWait::NOP: 6472 nop(); 6473 break; 6474 case SpinWait::ISB: 6475 isb(); 6476 break; 6477 case SpinWait::YIELD: 6478 yield(); 6479 break; 6480 default: 6481 ShouldNotReachHere(); 6482 } 6483 } 6484 } 6485 6486 // Stack frame creation/removal 6487 6488 void MacroAssembler::enter(bool strip_ret_addr) { 6489 if (strip_ret_addr) { 6490 // Addresses can only be signed once. If there are multiple nested frames being created 6491 // in the same function, then the return address needs stripping first. 6492 strip_return_address(); 6493 } 6494 protect_return_address(); 6495 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6496 mov(rfp, sp); 6497 } 6498 6499 void MacroAssembler::leave() { 6500 mov(sp, rfp); 6501 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6502 authenticate_return_address(); 6503 } 6504 6505 // ROP Protection 6506 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6507 // destroying stack frames or whenever directly loading/storing the LR to memory. 6508 // If ROP protection is not set then these functions are no-ops. 6509 // For more details on PAC see pauth_aarch64.hpp. 6510 6511 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6512 // Uses value zero as the modifier. 6513 // 6514 void MacroAssembler::protect_return_address() { 6515 if (VM_Version::use_rop_protection()) { 6516 check_return_address(); 6517 paciaz(); 6518 } 6519 } 6520 6521 // Sign the return value in the given register. Use before updating the LR in the existing stack 6522 // frame for the current function. 6523 // Uses value zero as the modifier. 6524 // 6525 void MacroAssembler::protect_return_address(Register return_reg) { 6526 if (VM_Version::use_rop_protection()) { 6527 check_return_address(return_reg); 6528 paciza(return_reg); 6529 } 6530 } 6531 6532 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6533 // Uses value zero as the modifier. 6534 // 6535 void MacroAssembler::authenticate_return_address() { 6536 if (VM_Version::use_rop_protection()) { 6537 autiaz(); 6538 check_return_address(); 6539 } 6540 } 6541 6542 // Authenticate the return value in the given register. Use before updating the LR in the existing 6543 // stack frame for the current function. 6544 // Uses value zero as the modifier. 6545 // 6546 void MacroAssembler::authenticate_return_address(Register return_reg) { 6547 if (VM_Version::use_rop_protection()) { 6548 autiza(return_reg); 6549 check_return_address(return_reg); 6550 } 6551 } 6552 6553 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6554 // there is no guaranteed way of authenticating the LR. 6555 // 6556 void MacroAssembler::strip_return_address() { 6557 if (VM_Version::use_rop_protection()) { 6558 xpaclri(); 6559 } 6560 } 6561 6562 #ifndef PRODUCT 6563 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6564 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6565 // it is difficult to debug back to the callee function. 6566 // This function simply loads from the address in the given register. 6567 // Use directly after authentication to catch authentication failures. 6568 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6569 // 6570 void MacroAssembler::check_return_address(Register return_reg) { 6571 if (VM_Version::use_rop_protection()) { 6572 ldr(zr, Address(return_reg)); 6573 } 6574 } 6575 #endif 6576 6577 // The java_calling_convention describes stack locations as ideal slots on 6578 // a frame with no abi restrictions. Since we must observe abi restrictions 6579 // (like the placement of the register window) the slots must be biased by 6580 // the following value. 6581 static int reg2offset_in(VMReg r) { 6582 // Account for saved rfp and lr 6583 // This should really be in_preserve_stack_slots 6584 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6585 } 6586 6587 static int reg2offset_out(VMReg r) { 6588 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6589 } 6590 6591 // On 64bit we will store integer like items to the stack as 6592 // 64bits items (AArch64 ABI) even though java would only store 6593 // 32bits for a parameter. On 32bit it will simply be 32bits 6594 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6595 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6596 if (src.first()->is_stack()) { 6597 if (dst.first()->is_stack()) { 6598 // stack to stack 6599 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6600 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6601 } else { 6602 // stack to reg 6603 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6604 } 6605 } else if (dst.first()->is_stack()) { 6606 // reg to stack 6607 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6608 } else { 6609 if (dst.first() != src.first()) { 6610 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6611 } 6612 } 6613 } 6614 6615 // An oop arg. Must pass a handle not the oop itself 6616 void MacroAssembler::object_move( 6617 OopMap* map, 6618 int oop_handle_offset, 6619 int framesize_in_slots, 6620 VMRegPair src, 6621 VMRegPair dst, 6622 bool is_receiver, 6623 int* receiver_offset) { 6624 6625 // must pass a handle. First figure out the location we use as a handle 6626 6627 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6628 6629 // See if oop is null if it is we need no handle 6630 6631 if (src.first()->is_stack()) { 6632 6633 // Oop is already on the stack as an argument 6634 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6635 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6636 if (is_receiver) { 6637 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6638 } 6639 6640 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6641 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6642 // conditionally move a null 6643 cmp(rscratch1, zr); 6644 csel(rHandle, zr, rHandle, Assembler::EQ); 6645 } else { 6646 6647 // Oop is in an a register we must store it to the space we reserve 6648 // on the stack for oop_handles and pass a handle if oop is non-null 6649 6650 const Register rOop = src.first()->as_Register(); 6651 int oop_slot; 6652 if (rOop == j_rarg0) 6653 oop_slot = 0; 6654 else if (rOop == j_rarg1) 6655 oop_slot = 1; 6656 else if (rOop == j_rarg2) 6657 oop_slot = 2; 6658 else if (rOop == j_rarg3) 6659 oop_slot = 3; 6660 else if (rOop == j_rarg4) 6661 oop_slot = 4; 6662 else if (rOop == j_rarg5) 6663 oop_slot = 5; 6664 else if (rOop == j_rarg6) 6665 oop_slot = 6; 6666 else { 6667 assert(rOop == j_rarg7, "wrong register"); 6668 oop_slot = 7; 6669 } 6670 6671 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6672 int offset = oop_slot*VMRegImpl::stack_slot_size; 6673 6674 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6675 // Store oop in handle area, may be null 6676 str(rOop, Address(sp, offset)); 6677 if (is_receiver) { 6678 *receiver_offset = offset; 6679 } 6680 6681 cmp(rOop, zr); 6682 lea(rHandle, Address(sp, offset)); 6683 // conditionally move a null 6684 csel(rHandle, zr, rHandle, Assembler::EQ); 6685 } 6686 6687 // If arg is on the stack then place it otherwise it is already in correct reg. 6688 if (dst.first()->is_stack()) { 6689 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6690 } 6691 } 6692 6693 // A float arg may have to do float reg int reg conversion 6694 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6695 if (src.first()->is_stack()) { 6696 if (dst.first()->is_stack()) { 6697 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6698 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6699 } else { 6700 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6701 } 6702 } else if (src.first() != dst.first()) { 6703 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6704 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6705 else 6706 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6707 } 6708 } 6709 6710 // A long move 6711 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6712 if (src.first()->is_stack()) { 6713 if (dst.first()->is_stack()) { 6714 // stack to stack 6715 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6716 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6717 } else { 6718 // stack to reg 6719 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6720 } 6721 } else if (dst.first()->is_stack()) { 6722 // reg to stack 6723 // Do we really have to sign extend??? 6724 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6725 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6726 } else { 6727 if (dst.first() != src.first()) { 6728 mov(dst.first()->as_Register(), src.first()->as_Register()); 6729 } 6730 } 6731 } 6732 6733 6734 // A double move 6735 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6736 if (src.first()->is_stack()) { 6737 if (dst.first()->is_stack()) { 6738 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6739 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6740 } else { 6741 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6742 } 6743 } else if (src.first() != dst.first()) { 6744 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6745 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6746 else 6747 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6748 } 6749 } 6750 6751 // Implements lightweight-locking. 6752 // 6753 // - obj: the object to be locked 6754 // - t1, t2, t3: temporary registers, will be destroyed 6755 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 6756 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { 6757 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6758 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); 6759 6760 Label push; 6761 const Register top = t1; 6762 const Register mark = t2; 6763 const Register t = t3; 6764 6765 // Preload the markWord. It is important that this is the first 6766 // instruction emitted as it is part of C1's null check semantics. 6767 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6768 6769 if (UseObjectMonitorTable) { 6770 // Clear cache in case fast locking succeeds. 6771 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); 6772 } 6773 6774 // Check if the lock-stack is full. 6775 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6776 cmpw(top, (unsigned)LockStack::end_offset()); 6777 br(Assembler::GE, slow); 6778 6779 // Check for recursion. 6780 subw(t, top, oopSize); 6781 ldr(t, Address(rthread, t)); 6782 cmp(obj, t); 6783 br(Assembler::EQ, push); 6784 6785 // Check header for monitor (0b10). 6786 tst(mark, markWord::monitor_value); 6787 br(Assembler::NE, slow); 6788 6789 // Try to lock. Transition lock bits 0b01 => 0b00 6790 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6791 orr(mark, mark, markWord::unlocked_value); 6792 eor(t, mark, markWord::unlocked_value); 6793 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 6794 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 6795 br(Assembler::NE, slow); 6796 6797 bind(push); 6798 // After successful lock, push object on lock-stack. 6799 str(obj, Address(rthread, top)); 6800 addw(top, top, oopSize); 6801 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6802 } 6803 6804 // Implements lightweight-unlocking. 6805 // 6806 // - obj: the object to be unlocked 6807 // - t1, t2, t3: temporary registers 6808 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 6809 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6810 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6811 // cmpxchg clobbers rscratch1. 6812 assert_different_registers(obj, t1, t2, t3, rscratch1); 6813 6814 #ifdef ASSERT 6815 { 6816 // Check for lock-stack underflow. 6817 Label stack_ok; 6818 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6819 cmpw(t1, (unsigned)LockStack::start_offset()); 6820 br(Assembler::GE, stack_ok); 6821 STOP("Lock-stack underflow"); 6822 bind(stack_ok); 6823 } 6824 #endif 6825 6826 Label unlocked, push_and_slow; 6827 const Register top = t1; 6828 const Register mark = t2; 6829 const Register t = t3; 6830 6831 // Check if obj is top of lock-stack. 6832 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6833 subw(top, top, oopSize); 6834 ldr(t, Address(rthread, top)); 6835 cmp(obj, t); 6836 br(Assembler::NE, slow); 6837 6838 // Pop lock-stack. 6839 DEBUG_ONLY(str(zr, Address(rthread, top));) 6840 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6841 6842 // Check if recursive. 6843 subw(t, top, oopSize); 6844 ldr(t, Address(rthread, t)); 6845 cmp(obj, t); 6846 br(Assembler::EQ, unlocked); 6847 6848 // Not recursive. Check header for monitor (0b10). 6849 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6850 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 6851 6852 #ifdef ASSERT 6853 // Check header not unlocked (0b01). 6854 Label not_unlocked; 6855 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 6856 stop("lightweight_unlock already unlocked"); 6857 bind(not_unlocked); 6858 #endif 6859 6860 // Try to unlock. Transition lock bits 0b00 => 0b01 6861 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6862 orr(t, mark, markWord::unlocked_value); 6863 cmpxchg(obj, mark, t, Assembler::xword, 6864 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 6865 br(Assembler::EQ, unlocked); 6866 6867 bind(push_and_slow); 6868 // Restore lock-stack and handle the unlock in runtime. 6869 DEBUG_ONLY(str(obj, Address(rthread, top));) 6870 addw(top, top, oopSize); 6871 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6872 b(slow); 6873 6874 bind(unlocked); 6875 }