1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "ci/ciEnv.hpp" 32 #include "compiler/compileTask.hpp" 33 #include "compiler/disassembler.hpp" 34 #include "compiler/oopMap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/barrierSetAssembler.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "gc/shared/cardTable.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "gc/shared/tlab_globals.hpp" 41 #include "interpreter/bytecodeHistogram.hpp" 42 #include "interpreter/interpreter.hpp" 43 #include "jvm.h" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "nativeInst_aarch64.hpp" 47 #include "oops/accessDecorators.hpp" 48 #include "oops/compressedOops.inline.hpp" 49 #include "oops/klass.inline.hpp" 50 #include "runtime/continuation.hpp" 51 #include "runtime/icache.hpp" 52 #include "runtime/interfaceSupport.inline.hpp" 53 #include "runtime/javaThread.hpp" 54 #include "runtime/jniHandles.inline.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/powerOfTwo.hpp" 58 #ifdef COMPILER1 59 #include "c1/c1_LIRAssembler.hpp" 60 #endif 61 #ifdef COMPILER2 62 #include "oops/oop.hpp" 63 #include "opto/compile.hpp" 64 #include "opto/node.hpp" 65 #include "opto/output.hpp" 66 #endif 67 68 #ifdef PRODUCT 69 #define BLOCK_COMMENT(str) /* nothing */ 70 #else 71 #define BLOCK_COMMENT(str) block_comment(str) 72 #endif 73 #define STOP(str) stop(str); 74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 75 76 #ifdef ASSERT 77 extern "C" void disnm(intptr_t p); 78 #endif 79 // Target-dependent relocation processing 80 // 81 // Instruction sequences whose target may need to be retrieved or 82 // patched are distinguished by their leading instruction, sorting 83 // them into three main instruction groups and related subgroups. 84 // 85 // 1) Branch, Exception and System (insn count = 1) 86 // 1a) Unconditional branch (immediate): 87 // b/bl imm19 88 // 1b) Compare & branch (immediate): 89 // cbz/cbnz Rt imm19 90 // 1c) Test & branch (immediate): 91 // tbz/tbnz Rt imm14 92 // 1d) Conditional branch (immediate): 93 // b.cond imm19 94 // 95 // 2) Loads and Stores (insn count = 1) 96 // 2a) Load register literal: 97 // ldr Rt imm19 98 // 99 // 3) Data Processing Immediate (insn count = 2 or 3) 100 // 3a) PC-rel. addressing 101 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 102 // adr/adrp Rx imm21; add Ry Rx #imm12 103 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 104 // adr/adrp Rx imm21 105 // adr/adrp Rx imm21; movk Rx #imm16<<32 106 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 107 // The latter form can only happen when the target is an 108 // ExternalAddress, and (by definition) ExternalAddresses don't 109 // move. Because of that property, there is never any need to 110 // patch the last of the three instructions. However, 111 // MacroAssembler::target_addr_for_insn takes all three 112 // instructions into account and returns the correct address. 113 // 3b) Move wide (immediate) 114 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 115 // 116 // A switch on a subset of the instruction's bits provides an 117 // efficient dispatch to these subcases. 118 // 119 // insn[28:26] -> main group ('x' == don't care) 120 // 00x -> UNALLOCATED 121 // 100 -> Data Processing Immediate 122 // 101 -> Branch, Exception and System 123 // x1x -> Loads and Stores 124 // 125 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 126 // n.b. in some cases extra bits need to be checked to verify the 127 // instruction is as expected 128 // 129 // 1) ... xx101x Branch, Exception and System 130 // 1a) 00___x Unconditional branch (immediate) 131 // 1b) 01___0 Compare & branch (immediate) 132 // 1c) 01___1 Test & branch (immediate) 133 // 1d) 10___0 Conditional branch (immediate) 134 // other Should not happen 135 // 136 // 2) ... xxx1x0 Loads and Stores 137 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 138 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 139 // strictly should be 64 bit non-FP/SIMD i.e. 140 // 0101_000 (i.e. requires insn[31:24] == 01011000) 141 // 142 // 3) ... xx100x Data Processing Immediate 143 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 144 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 145 // strictly should be 64 bit movz #imm16<<0 146 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 147 // 148 class RelocActions { 149 protected: 150 typedef int (*reloc_insn)(address insn_addr, address &target); 151 152 virtual reloc_insn adrpMem() = 0; 153 virtual reloc_insn adrpAdd() = 0; 154 virtual reloc_insn adrpMovk() = 0; 155 156 const address _insn_addr; 157 const uint32_t _insn; 158 159 static uint32_t insn_at(address insn_addr, int n) { 160 return ((uint32_t*)insn_addr)[n]; 161 } 162 uint32_t insn_at(int n) const { 163 return insn_at(_insn_addr, n); 164 } 165 166 public: 167 168 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 169 RelocActions(address insn_addr, uint32_t insn) 170 : _insn_addr(insn_addr), _insn(insn) {} 171 172 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 173 virtual int conditionalBranch(address insn_addr, address &target) = 0; 174 virtual int testAndBranch(address insn_addr, address &target) = 0; 175 virtual int loadStore(address insn_addr, address &target) = 0; 176 virtual int adr(address insn_addr, address &target) = 0; 177 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 178 virtual int immediate(address insn_addr, address &target) = 0; 179 virtual void verify(address insn_addr, address &target) = 0; 180 181 int ALWAYSINLINE run(address insn_addr, address &target) { 182 int instructions = 1; 183 184 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 185 switch(dispatch) { 186 case 0b001010: 187 case 0b001011: { 188 instructions = unconditionalBranch(insn_addr, target); 189 break; 190 } 191 case 0b101010: // Conditional branch (immediate) 192 case 0b011010: { // Compare & branch (immediate) 193 instructions = conditionalBranch(insn_addr, target); 194 break; 195 } 196 case 0b011011: { 197 instructions = testAndBranch(insn_addr, target); 198 break; 199 } 200 case 0b001100: 201 case 0b001110: 202 case 0b011100: 203 case 0b011110: 204 case 0b101100: 205 case 0b101110: 206 case 0b111100: 207 case 0b111110: { 208 // load/store 209 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 210 // Load register (literal) 211 instructions = loadStore(insn_addr, target); 212 break; 213 } else { 214 // nothing to do 215 assert(target == 0, "did not expect to relocate target for polling page load"); 216 } 217 break; 218 } 219 case 0b001000: 220 case 0b011000: 221 case 0b101000: 222 case 0b111000: { 223 // adr/adrp 224 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 225 int shift = Instruction_aarch64::extract(_insn, 31, 31); 226 if (shift) { 227 uint32_t insn2 = insn_at(1); 228 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 229 Instruction_aarch64::extract(_insn, 4, 0) == 230 Instruction_aarch64::extract(insn2, 9, 5)) { 231 instructions = adrp(insn_addr, target, adrpMem()); 232 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 233 Instruction_aarch64::extract(_insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 4, 0)) { 235 instructions = adrp(insn_addr, target, adrpAdd()); 236 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 237 Instruction_aarch64::extract(_insn, 4, 0) == 238 Instruction_aarch64::extract(insn2, 4, 0)) { 239 instructions = adrp(insn_addr, target, adrpMovk()); 240 } else { 241 ShouldNotReachHere(); 242 } 243 } else { 244 instructions = adr(insn_addr, target); 245 } 246 break; 247 } 248 case 0b001001: 249 case 0b011001: 250 case 0b101001: 251 case 0b111001: { 252 instructions = immediate(insn_addr, target); 253 break; 254 } 255 default: { 256 ShouldNotReachHere(); 257 } 258 } 259 260 verify(insn_addr, target); 261 return instructions * NativeInstruction::instruction_size; 262 } 263 }; 264 265 class Patcher : public RelocActions { 266 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 267 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 268 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 269 270 public: 271 Patcher(address insn_addr) : RelocActions(insn_addr) {} 272 273 virtual int unconditionalBranch(address insn_addr, address &target) { 274 intptr_t offset = (target - insn_addr) >> 2; 275 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 276 return 1; 277 } 278 virtual int conditionalBranch(address insn_addr, address &target) { 279 intptr_t offset = (target - insn_addr) >> 2; 280 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 281 return 1; 282 } 283 virtual int testAndBranch(address insn_addr, address &target) { 284 intptr_t offset = (target - insn_addr) >> 2; 285 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 286 return 1; 287 } 288 virtual int loadStore(address insn_addr, address &target) { 289 intptr_t offset = (target - insn_addr) >> 2; 290 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 291 return 1; 292 } 293 virtual int adr(address insn_addr, address &target) { 294 #ifdef ASSERT 295 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 296 #endif 297 // PC-rel. addressing 298 ptrdiff_t offset = target - insn_addr; 299 int offset_lo = offset & 3; 300 offset >>= 2; 301 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 302 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 303 return 1; 304 } 305 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 306 int instructions = 1; 307 #ifdef ASSERT 308 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 309 #endif 310 ptrdiff_t offset = target - insn_addr; 311 instructions = 2; 312 precond(inner != nullptr); 313 // Give the inner reloc a chance to modify the target. 314 address adjusted_target = target; 315 instructions = (*inner)(insn_addr, adjusted_target); 316 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 317 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 318 offset = adr_page - pc_page; 319 int offset_lo = offset & 3; 320 offset >>= 2; 321 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 322 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 323 return instructions; 324 } 325 static int adrpMem_impl(address insn_addr, address &target) { 326 uintptr_t dest = (uintptr_t)target; 327 int offset_lo = dest & 0xfff; 328 uint32_t insn2 = insn_at(insn_addr, 1); 329 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 330 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 331 guarantee(((dest >> size) << size) == dest, "misaligned target"); 332 return 2; 333 } 334 static int adrpAdd_impl(address insn_addr, address &target) { 335 uintptr_t dest = (uintptr_t)target; 336 int offset_lo = dest & 0xfff; 337 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 338 return 2; 339 } 340 static int adrpMovk_impl(address insn_addr, address &target) { 341 uintptr_t dest = uintptr_t(target); 342 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 343 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 344 target = address(dest); 345 return 2; 346 } 347 virtual int immediate(address insn_addr, address &target) { 348 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 349 uint64_t dest = (uint64_t)target; 350 // Move wide constant 351 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 352 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 353 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 354 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 355 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 356 return 3; 357 } 358 virtual void verify(address insn_addr, address &target) { 359 #ifdef ASSERT 360 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 361 if (!(address_is == target)) { 362 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 363 disnm((intptr_t)insn_addr); 364 assert(address_is == target, "should be"); 365 } 366 #endif 367 } 368 }; 369 370 // If insn1 and insn2 use the same register to form an address, either 371 // by an offsetted LDR or a simple ADD, return the offset. If the 372 // second instruction is an LDR, the offset may be scaled. 373 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 374 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 375 Instruction_aarch64::extract(insn1, 4, 0) == 376 Instruction_aarch64::extract(insn2, 9, 5)) { 377 // Load/store register (unsigned immediate) 378 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 379 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 380 byte_offset <<= size; 381 return true; 382 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 383 Instruction_aarch64::extract(insn1, 4, 0) == 384 Instruction_aarch64::extract(insn2, 4, 0)) { 385 // add (immediate) 386 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 387 return true; 388 } 389 return false; 390 } 391 392 class Decoder : public RelocActions { 393 virtual reloc_insn adrpMem() { return &Decoder::adrpMem_impl; } 394 virtual reloc_insn adrpAdd() { return &Decoder::adrpAdd_impl; } 395 virtual reloc_insn adrpMovk() { return &Decoder::adrpMovk_impl; } 396 397 public: 398 Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 399 400 virtual int loadStore(address insn_addr, address &target) { 401 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 402 target = insn_addr + (offset << 2); 403 return 1; 404 } 405 virtual int unconditionalBranch(address insn_addr, address &target) { 406 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 407 target = insn_addr + (offset << 2); 408 return 1; 409 } 410 virtual int conditionalBranch(address insn_addr, address &target) { 411 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 412 target = address(((uint64_t)insn_addr + (offset << 2))); 413 return 1; 414 } 415 virtual int testAndBranch(address insn_addr, address &target) { 416 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 417 target = address(((uint64_t)insn_addr + (offset << 2))); 418 return 1; 419 } 420 virtual int adr(address insn_addr, address &target) { 421 // PC-rel. addressing 422 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 423 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 424 target = address((uint64_t)insn_addr + offset); 425 return 1; 426 } 427 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 428 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 429 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 430 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 431 int shift = 12; 432 offset <<= shift; 433 uint64_t target_page = ((uint64_t)insn_addr) + offset; 434 target_page &= ((uint64_t)-1) << shift; 435 uint32_t insn2 = insn_at(1); 436 target = address(target_page); 437 precond(inner != nullptr); 438 (*inner)(insn_addr, target); 439 return 2; 440 } 441 static int adrpMem_impl(address insn_addr, address &target) { 442 uint32_t insn2 = insn_at(insn_addr, 1); 443 // Load/store register (unsigned immediate) 444 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 445 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 446 byte_offset <<= size; 447 target += byte_offset; 448 return 2; 449 } 450 static int adrpAdd_impl(address insn_addr, address &target) { 451 uint32_t insn2 = insn_at(insn_addr, 1); 452 // add (immediate) 453 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 454 target += byte_offset; 455 return 2; 456 } 457 static int adrpMovk_impl(address insn_addr, address &target) { 458 uint32_t insn2 = insn_at(insn_addr, 1); 459 uint64_t dest = uint64_t(target); 460 dest = (dest & 0xffff0000ffffffff) | 461 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 462 target = address(dest); 463 464 // We know the destination 4k page. Maybe we have a third 465 // instruction. 466 uint32_t insn = insn_at(insn_addr, 0); 467 uint32_t insn3 = insn_at(insn_addr, 2); 468 ptrdiff_t byte_offset; 469 if (offset_for(insn, insn3, byte_offset)) { 470 target += byte_offset; 471 return 3; 472 } else { 473 return 2; 474 } 475 } 476 virtual int immediate(address insn_addr, address &target) { 477 uint32_t *insns = (uint32_t *)insn_addr; 478 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 479 // Move wide constant: movz, movk, movk. See movptr(). 480 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 481 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 482 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 483 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 484 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 485 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 486 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 487 return 3; 488 } 489 virtual void verify(address insn_addr, address &target) { 490 } 491 }; 492 493 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 494 Decoder decoder(insn_addr, insn); 495 address target; 496 decoder.run(insn_addr, target); 497 return target; 498 } 499 500 // Patch any kind of instruction; there may be several instructions. 501 // Return the total length (in bytes) of the instructions. 502 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 503 Patcher patcher(insn_addr); 504 return patcher.run(insn_addr, target); 505 } 506 507 int MacroAssembler::patch_oop(address insn_addr, address o) { 508 int instructions; 509 unsigned insn = *(unsigned*)insn_addr; 510 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 511 512 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 513 // narrow OOPs by setting the upper 16 bits in the first 514 // instruction. 515 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 516 // Move narrow OOP 517 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 518 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 519 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 520 instructions = 2; 521 } else { 522 // Move wide OOP 523 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 524 uintptr_t dest = (uintptr_t)o; 525 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 526 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 527 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 528 instructions = 3; 529 } 530 return instructions * NativeInstruction::instruction_size; 531 } 532 533 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 534 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 535 // We encode narrow ones by setting the upper 16 bits in the first 536 // instruction. 537 NativeInstruction *insn = nativeInstruction_at(insn_addr); 538 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 539 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 540 541 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 542 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 543 return 2 * NativeInstruction::instruction_size; 544 } 545 546 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 547 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 548 return nullptr; 549 } 550 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 551 } 552 553 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 554 if (acquire) { 555 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 556 ldar(tmp, tmp); 557 } else { 558 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 559 } 560 if (at_return) { 561 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 562 // we may safely use the sp instead to perform the stack watermark check. 563 cmp(in_nmethod ? sp : rfp, tmp); 564 br(Assembler::HI, slow_path); 565 } else { 566 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 567 } 568 } 569 570 void MacroAssembler::rt_call(address dest, Register tmp) { 571 CodeBlob *cb = CodeCache::find_blob(dest); 572 if (cb) { 573 far_call(RuntimeAddress(dest)); 574 } else { 575 lea(tmp, RuntimeAddress(dest)); 576 blr(tmp); 577 } 578 } 579 580 void MacroAssembler::push_cont_fastpath(Register java_thread) { 581 if (!Continuations::enabled()) return; 582 Label done; 583 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 584 cmp(sp, rscratch1); 585 br(Assembler::LS, done); 586 mov(rscratch1, sp); // we can't use sp as the source in str 587 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 588 bind(done); 589 } 590 591 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 592 if (!Continuations::enabled()) return; 593 Label done; 594 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 595 cmp(sp, rscratch1); 596 br(Assembler::LO, done); 597 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 598 bind(done); 599 } 600 601 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 602 // we must set sp to zero to clear frame 603 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 604 605 // must clear fp, so that compiled frames are not confused; it is 606 // possible that we need it only for debugging 607 if (clear_fp) { 608 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 609 } 610 611 // Always clear the pc because it could have been set by make_walkable() 612 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 613 } 614 615 // Calls to C land 616 // 617 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 618 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 619 // has to be reset to 0. This is required to allow proper stack traversal. 620 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 621 Register last_java_fp, 622 Register last_java_pc, 623 Register scratch) { 624 625 if (last_java_pc->is_valid()) { 626 str(last_java_pc, Address(rthread, 627 JavaThread::frame_anchor_offset() 628 + JavaFrameAnchor::last_Java_pc_offset())); 629 } 630 631 // determine last_java_sp register 632 if (last_java_sp == sp) { 633 mov(scratch, sp); 634 last_java_sp = scratch; 635 } else if (!last_java_sp->is_valid()) { 636 last_java_sp = esp; 637 } 638 639 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 640 641 // last_java_fp is optional 642 if (last_java_fp->is_valid()) { 643 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 644 } 645 } 646 647 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 648 Register last_java_fp, 649 address last_java_pc, 650 Register scratch) { 651 assert(last_java_pc != nullptr, "must provide a valid PC"); 652 653 adr(scratch, last_java_pc); 654 str(scratch, Address(rthread, 655 JavaThread::frame_anchor_offset() 656 + JavaFrameAnchor::last_Java_pc_offset())); 657 658 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 659 } 660 661 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 662 Register last_java_fp, 663 Label &L, 664 Register scratch) { 665 if (L.is_bound()) { 666 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 667 } else { 668 InstructionMark im(this); 669 L.add_patch_at(code(), locator()); 670 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 671 } 672 } 673 674 static inline bool target_needs_far_branch(address addr) { 675 // codecache size <= 128M 676 if (!MacroAssembler::far_branches()) { 677 return false; 678 } 679 // codecache size > 240M 680 if (MacroAssembler::codestub_branch_needs_far_jump()) { 681 return true; 682 } 683 // codecache size: 128M..240M 684 return !CodeCache::is_non_nmethod(addr); 685 } 686 687 void MacroAssembler::far_call(Address entry, Register tmp) { 688 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 689 assert(CodeCache::find_blob(entry.target()) != nullptr, 690 "destination of far call not found in code cache"); 691 assert(entry.rspec().type() == relocInfo::external_word_type 692 || entry.rspec().type() == relocInfo::runtime_call_type 693 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 694 if (target_needs_far_branch(entry.target())) { 695 uint64_t offset; 696 // We can use ADRP here because we know that the total size of 697 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 698 adrp(tmp, entry, offset); 699 add(tmp, tmp, offset); 700 blr(tmp); 701 } else { 702 bl(entry); 703 } 704 } 705 706 int MacroAssembler::far_jump(Address entry, Register tmp) { 707 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 708 assert(CodeCache::find_blob(entry.target()) != nullptr, 709 "destination of far call not found in code cache"); 710 assert(entry.rspec().type() == relocInfo::external_word_type 711 || entry.rspec().type() == relocInfo::runtime_call_type 712 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 713 address start = pc(); 714 if (target_needs_far_branch(entry.target())) { 715 uint64_t offset; 716 // We can use ADRP here because we know that the total size of 717 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 718 adrp(tmp, entry, offset); 719 add(tmp, tmp, offset); 720 br(tmp); 721 } else { 722 b(entry); 723 } 724 return pc() - start; 725 } 726 727 void MacroAssembler::reserved_stack_check() { 728 // testing if reserved zone needs to be enabled 729 Label no_reserved_zone_enabling; 730 731 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 732 cmp(sp, rscratch1); 733 br(Assembler::LO, no_reserved_zone_enabling); 734 735 enter(); // LR and FP are live. 736 lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); 737 mov(c_rarg0, rthread); 738 blr(rscratch1); 739 leave(); 740 741 // We have already removed our own frame. 742 // throw_delayed_StackOverflowError will think that it's been 743 // called by our caller. 744 lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 745 br(rscratch1); 746 should_not_reach_here(); 747 748 bind(no_reserved_zone_enabling); 749 } 750 751 static void pass_arg0(MacroAssembler* masm, Register arg) { 752 if (c_rarg0 != arg ) { 753 masm->mov(c_rarg0, arg); 754 } 755 } 756 757 static void pass_arg1(MacroAssembler* masm, Register arg) { 758 if (c_rarg1 != arg ) { 759 masm->mov(c_rarg1, arg); 760 } 761 } 762 763 static void pass_arg2(MacroAssembler* masm, Register arg) { 764 if (c_rarg2 != arg ) { 765 masm->mov(c_rarg2, arg); 766 } 767 } 768 769 static void pass_arg3(MacroAssembler* masm, Register arg) { 770 if (c_rarg3 != arg ) { 771 masm->mov(c_rarg3, arg); 772 } 773 } 774 775 void MacroAssembler::call_VM_base(Register oop_result, 776 Register java_thread, 777 Register last_java_sp, 778 address entry_point, 779 int number_of_arguments, 780 bool check_exceptions) { 781 // determine java_thread register 782 if (!java_thread->is_valid()) { 783 java_thread = rthread; 784 } 785 786 // determine last_java_sp register 787 if (!last_java_sp->is_valid()) { 788 last_java_sp = esp; 789 } 790 791 // debugging support 792 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 793 assert(java_thread == rthread, "unexpected register"); 794 #ifdef ASSERT 795 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 796 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 797 #endif // ASSERT 798 799 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 800 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 801 802 // push java thread (becomes first argument of C function) 803 804 mov(c_rarg0, java_thread); 805 806 // set last Java frame before call 807 assert(last_java_sp != rfp, "can't use rfp"); 808 809 Label l; 810 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 811 812 // do the call, remove parameters 813 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 814 815 // lr could be poisoned with PAC signature during throw_pending_exception 816 // if it was tail-call optimized by compiler, since lr is not callee-saved 817 // reload it with proper value 818 adr(lr, l); 819 820 // reset last Java frame 821 // Only interpreter should have to clear fp 822 reset_last_Java_frame(true); 823 824 // C++ interp handles this in the interpreter 825 check_and_handle_popframe(java_thread); 826 check_and_handle_earlyret(java_thread); 827 828 if (check_exceptions) { 829 // check for pending exceptions (java_thread is set upon return) 830 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 831 Label ok; 832 cbz(rscratch1, ok); 833 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 834 br(rscratch1); 835 bind(ok); 836 } 837 838 // get oop result if there is one and reset the value in the thread 839 if (oop_result->is_valid()) { 840 get_vm_result(oop_result, java_thread); 841 } 842 } 843 844 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 845 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 846 } 847 848 // Check the entry target is always reachable from any branch. 849 static bool is_always_within_branch_range(Address entry) { 850 const address target = entry.target(); 851 852 if (!CodeCache::contains(target)) { 853 // We always use trampolines for callees outside CodeCache. 854 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 855 return false; 856 } 857 858 if (!MacroAssembler::far_branches()) { 859 return true; 860 } 861 862 if (entry.rspec().type() == relocInfo::runtime_call_type) { 863 // Runtime calls are calls of a non-compiled method (stubs, adapters). 864 // Non-compiled methods stay forever in CodeCache. 865 // We check whether the longest possible branch is within the branch range. 866 assert(CodeCache::find_blob(target) != nullptr && 867 !CodeCache::find_blob(target)->is_compiled(), 868 "runtime call of compiled method"); 869 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 870 const address left_longest_branch_start = CodeCache::low_bound(); 871 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 872 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 873 return is_reachable; 874 } 875 876 return false; 877 } 878 879 // Maybe emit a call via a trampoline. If the code cache is small 880 // trampolines won't be emitted. 881 address MacroAssembler::trampoline_call(Address entry) { 882 assert(entry.rspec().type() == relocInfo::runtime_call_type 883 || entry.rspec().type() == relocInfo::opt_virtual_call_type 884 || entry.rspec().type() == relocInfo::static_call_type 885 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 886 887 address target = entry.target(); 888 889 if (!is_always_within_branch_range(entry)) { 890 if (!in_scratch_emit_size()) { 891 // We don't want to emit a trampoline if C2 is generating dummy 892 // code during its branch shortening phase. 893 if (entry.rspec().type() == relocInfo::runtime_call_type) { 894 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 895 code()->share_trampoline_for(entry.target(), offset()); 896 } else { 897 address stub = emit_trampoline_stub(offset(), target); 898 if (stub == nullptr) { 899 postcond(pc() == badAddress); 900 return nullptr; // CodeCache is full 901 } 902 } 903 } 904 target = pc(); 905 } 906 907 address call_pc = pc(); 908 relocate(entry.rspec()); 909 bl(target); 910 911 postcond(pc() != badAddress); 912 return call_pc; 913 } 914 915 // Emit a trampoline stub for a call to a target which is too far away. 916 // 917 // code sequences: 918 // 919 // call-site: 920 // branch-and-link to <destination> or <trampoline stub> 921 // 922 // Related trampoline stub for this call site in the stub section: 923 // load the call target from the constant pool 924 // branch (LR still points to the call site above) 925 926 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 927 address dest) { 928 // Max stub size: alignment nop, TrampolineStub. 929 address stub = start_a_stub(max_trampoline_stub_size()); 930 if (stub == nullptr) { 931 return nullptr; // CodeBuffer::expand failed 932 } 933 934 // Create a trampoline stub relocation which relates this trampoline stub 935 // with the call instruction at insts_call_instruction_offset in the 936 // instructions code-section. 937 align(wordSize); 938 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 939 + insts_call_instruction_offset)); 940 const int stub_start_offset = offset(); 941 942 // Now, create the trampoline stub's code: 943 // - load the call 944 // - call 945 Label target; 946 ldr(rscratch1, target); 947 br(rscratch1); 948 bind(target); 949 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 950 "should be"); 951 emit_int64((int64_t)dest); 952 953 const address stub_start_addr = addr_at(stub_start_offset); 954 955 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 956 957 end_a_stub(); 958 return stub_start_addr; 959 } 960 961 int MacroAssembler::max_trampoline_stub_size() { 962 // Max stub size: alignment nop, TrampolineStub. 963 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 964 } 965 966 void MacroAssembler::emit_static_call_stub() { 967 // CompiledDirectStaticCall::set_to_interpreted knows the 968 // exact layout of this stub. 969 970 isb(); 971 mov_metadata(rmethod, nullptr); 972 973 // Jump to the entry point of the c2i stub. 974 movptr(rscratch1, 0); 975 br(rscratch1); 976 } 977 978 int MacroAssembler::static_call_stub_size() { 979 // isb; movk; movz; movz; movk; movz; movz; br 980 return 8 * NativeInstruction::instruction_size; 981 } 982 983 void MacroAssembler::c2bool(Register x) { 984 // implements x == 0 ? 0 : 1 985 // note: must only look at least-significant byte of x 986 // since C-style booleans are stored in one byte 987 // only! (was bug) 988 tst(x, 0xff); 989 cset(x, Assembler::NE); 990 } 991 992 address MacroAssembler::ic_call(address entry, jint method_index) { 993 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 994 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 995 // uintptr_t offset; 996 // ldr_constant(rscratch2, const_ptr); 997 movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); 998 return trampoline_call(Address(entry, rh)); 999 } 1000 1001 // Implementation of call_VM versions 1002 1003 void MacroAssembler::call_VM(Register oop_result, 1004 address entry_point, 1005 bool check_exceptions) { 1006 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1007 } 1008 1009 void MacroAssembler::call_VM(Register oop_result, 1010 address entry_point, 1011 Register arg_1, 1012 bool check_exceptions) { 1013 pass_arg1(this, arg_1); 1014 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1015 } 1016 1017 void MacroAssembler::call_VM(Register oop_result, 1018 address entry_point, 1019 Register arg_1, 1020 Register arg_2, 1021 bool check_exceptions) { 1022 assert(arg_1 != c_rarg2, "smashed arg"); 1023 pass_arg2(this, arg_2); 1024 pass_arg1(this, arg_1); 1025 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1026 } 1027 1028 void MacroAssembler::call_VM(Register oop_result, 1029 address entry_point, 1030 Register arg_1, 1031 Register arg_2, 1032 Register arg_3, 1033 bool check_exceptions) { 1034 assert(arg_1 != c_rarg3, "smashed arg"); 1035 assert(arg_2 != c_rarg3, "smashed arg"); 1036 pass_arg3(this, arg_3); 1037 1038 assert(arg_1 != c_rarg2, "smashed arg"); 1039 pass_arg2(this, arg_2); 1040 1041 pass_arg1(this, arg_1); 1042 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1043 } 1044 1045 void MacroAssembler::call_VM(Register oop_result, 1046 Register last_java_sp, 1047 address entry_point, 1048 int number_of_arguments, 1049 bool check_exceptions) { 1050 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1051 } 1052 1053 void MacroAssembler::call_VM(Register oop_result, 1054 Register last_java_sp, 1055 address entry_point, 1056 Register arg_1, 1057 bool check_exceptions) { 1058 pass_arg1(this, arg_1); 1059 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1060 } 1061 1062 void MacroAssembler::call_VM(Register oop_result, 1063 Register last_java_sp, 1064 address entry_point, 1065 Register arg_1, 1066 Register arg_2, 1067 bool check_exceptions) { 1068 1069 assert(arg_1 != c_rarg2, "smashed arg"); 1070 pass_arg2(this, arg_2); 1071 pass_arg1(this, arg_1); 1072 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1073 } 1074 1075 void MacroAssembler::call_VM(Register oop_result, 1076 Register last_java_sp, 1077 address entry_point, 1078 Register arg_1, 1079 Register arg_2, 1080 Register arg_3, 1081 bool check_exceptions) { 1082 assert(arg_1 != c_rarg3, "smashed arg"); 1083 assert(arg_2 != c_rarg3, "smashed arg"); 1084 pass_arg3(this, arg_3); 1085 assert(arg_1 != c_rarg2, "smashed arg"); 1086 pass_arg2(this, arg_2); 1087 pass_arg1(this, arg_1); 1088 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1089 } 1090 1091 1092 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1093 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1094 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1095 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1096 } 1097 1098 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1099 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1100 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1101 } 1102 1103 void MacroAssembler::align(int modulus) { 1104 while (offset() % modulus != 0) nop(); 1105 } 1106 1107 void MacroAssembler::post_call_nop() { 1108 if (!Continuations::enabled()) { 1109 return; 1110 } 1111 InstructionMark im(this); 1112 relocate(post_call_nop_Relocation::spec()); 1113 InlineSkippedInstructionsCounter skipCounter(this); 1114 nop(); 1115 movk(zr, 0); 1116 movk(zr, 0); 1117 } 1118 1119 // these are no-ops overridden by InterpreterMacroAssembler 1120 1121 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1122 1123 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1124 1125 // Look up the method for a megamorphic invokeinterface call. 1126 // The target method is determined by <intf_klass, itable_index>. 1127 // The receiver klass is in recv_klass. 1128 // On success, the result will be in method_result, and execution falls through. 1129 // On failure, execution transfers to the given label. 1130 void MacroAssembler::lookup_interface_method(Register recv_klass, 1131 Register intf_klass, 1132 RegisterOrConstant itable_index, 1133 Register method_result, 1134 Register scan_temp, 1135 Label& L_no_such_interface, 1136 bool return_method) { 1137 assert_different_registers(recv_klass, intf_klass, scan_temp); 1138 assert_different_registers(method_result, intf_klass, scan_temp); 1139 assert(recv_klass != method_result || !return_method, 1140 "recv_klass can be destroyed when method isn't needed"); 1141 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1142 "caller must use same register for non-constant itable index as for method"); 1143 1144 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1145 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1146 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1147 int scan_step = itableOffsetEntry::size() * wordSize; 1148 int vte_size = vtableEntry::size_in_bytes(); 1149 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1150 1151 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1152 1153 // %%% Could store the aligned, prescaled offset in the klassoop. 1154 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1155 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1156 add(scan_temp, scan_temp, vtable_base); 1157 1158 if (return_method) { 1159 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1160 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1161 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1162 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1163 if (itentry_off) 1164 add(recv_klass, recv_klass, itentry_off); 1165 } 1166 1167 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1168 // if (scan->interface() == intf) { 1169 // result = (klass + scan->offset() + itable_index); 1170 // } 1171 // } 1172 Label search, found_method; 1173 1174 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1175 cmp(intf_klass, method_result); 1176 br(Assembler::EQ, found_method); 1177 bind(search); 1178 // Check that the previous entry is non-null. A null entry means that 1179 // the receiver class doesn't implement the interface, and wasn't the 1180 // same as when the caller was compiled. 1181 cbz(method_result, L_no_such_interface); 1182 if (itableOffsetEntry::interface_offset() != 0) { 1183 add(scan_temp, scan_temp, scan_step); 1184 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1185 } else { 1186 ldr(method_result, Address(pre(scan_temp, scan_step))); 1187 } 1188 cmp(intf_klass, method_result); 1189 br(Assembler::NE, search); 1190 1191 bind(found_method); 1192 1193 // Got a hit. 1194 if (return_method) { 1195 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1196 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1197 } 1198 } 1199 1200 // virtual method calling 1201 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1202 RegisterOrConstant vtable_index, 1203 Register method_result) { 1204 assert(vtableEntry::size() * wordSize == 8, 1205 "adjust the scaling in the code below"); 1206 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1207 1208 if (vtable_index.is_register()) { 1209 lea(method_result, Address(recv_klass, 1210 vtable_index.as_register(), 1211 Address::lsl(LogBytesPerWord))); 1212 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1213 } else { 1214 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1215 ldr(method_result, 1216 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1217 } 1218 } 1219 1220 void MacroAssembler::check_klass_subtype(Register sub_klass, 1221 Register super_klass, 1222 Register temp_reg, 1223 Label& L_success) { 1224 Label L_failure; 1225 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1226 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1227 bind(L_failure); 1228 } 1229 1230 1231 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1232 Register super_klass, 1233 Register temp_reg, 1234 Label* L_success, 1235 Label* L_failure, 1236 Label* L_slow_path, 1237 RegisterOrConstant super_check_offset) { 1238 assert_different_registers(sub_klass, super_klass, temp_reg); 1239 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1240 if (super_check_offset.is_register()) { 1241 assert_different_registers(sub_klass, super_klass, 1242 super_check_offset.as_register()); 1243 } else if (must_load_sco) { 1244 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1245 } 1246 1247 Label L_fallthrough; 1248 int label_nulls = 0; 1249 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1250 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1251 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1252 assert(label_nulls <= 1, "at most one null in the batch"); 1253 1254 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1255 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1256 Address super_check_offset_addr(super_klass, sco_offset); 1257 1258 // Hacked jmp, which may only be used just before L_fallthrough. 1259 #define final_jmp(label) \ 1260 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1261 else b(label) /*omit semi*/ 1262 1263 // If the pointers are equal, we are done (e.g., String[] elements). 1264 // This self-check enables sharing of secondary supertype arrays among 1265 // non-primary types such as array-of-interface. Otherwise, each such 1266 // type would need its own customized SSA. 1267 // We move this check to the front of the fast path because many 1268 // type checks are in fact trivially successful in this manner, 1269 // so we get a nicely predicted branch right at the start of the check. 1270 cmp(sub_klass, super_klass); 1271 br(Assembler::EQ, *L_success); 1272 1273 // Check the supertype display: 1274 if (must_load_sco) { 1275 ldrw(temp_reg, super_check_offset_addr); 1276 super_check_offset = RegisterOrConstant(temp_reg); 1277 } 1278 Address super_check_addr(sub_klass, super_check_offset); 1279 ldr(rscratch1, super_check_addr); 1280 cmp(super_klass, rscratch1); // load displayed supertype 1281 1282 // This check has worked decisively for primary supers. 1283 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1284 // (Secondary supers are interfaces and very deeply nested subtypes.) 1285 // This works in the same check above because of a tricky aliasing 1286 // between the super_cache and the primary super display elements. 1287 // (The 'super_check_addr' can address either, as the case requires.) 1288 // Note that the cache is updated below if it does not help us find 1289 // what we need immediately. 1290 // So if it was a primary super, we can just fail immediately. 1291 // Otherwise, it's the slow path for us (no success at this point). 1292 1293 if (super_check_offset.is_register()) { 1294 br(Assembler::EQ, *L_success); 1295 subs(zr, super_check_offset.as_register(), sc_offset); 1296 if (L_failure == &L_fallthrough) { 1297 br(Assembler::EQ, *L_slow_path); 1298 } else { 1299 br(Assembler::NE, *L_failure); 1300 final_jmp(*L_slow_path); 1301 } 1302 } else if (super_check_offset.as_constant() == sc_offset) { 1303 // Need a slow path; fast failure is impossible. 1304 if (L_slow_path == &L_fallthrough) { 1305 br(Assembler::EQ, *L_success); 1306 } else { 1307 br(Assembler::NE, *L_slow_path); 1308 final_jmp(*L_success); 1309 } 1310 } else { 1311 // No slow path; it's a fast decision. 1312 if (L_failure == &L_fallthrough) { 1313 br(Assembler::EQ, *L_success); 1314 } else { 1315 br(Assembler::NE, *L_failure); 1316 final_jmp(*L_success); 1317 } 1318 } 1319 1320 bind(L_fallthrough); 1321 1322 #undef final_jmp 1323 } 1324 1325 // These two are taken from x86, but they look generally useful 1326 1327 // scans count pointer sized words at [addr] for occurrence of value, 1328 // generic 1329 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1330 Register scratch) { 1331 Label Lloop, Lexit; 1332 cbz(count, Lexit); 1333 bind(Lloop); 1334 ldr(scratch, post(addr, wordSize)); 1335 cmp(value, scratch); 1336 br(EQ, Lexit); 1337 sub(count, count, 1); 1338 cbnz(count, Lloop); 1339 bind(Lexit); 1340 } 1341 1342 // scans count 4 byte words at [addr] for occurrence of value, 1343 // generic 1344 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1345 Register scratch) { 1346 Label Lloop, Lexit; 1347 cbz(count, Lexit); 1348 bind(Lloop); 1349 ldrw(scratch, post(addr, wordSize)); 1350 cmpw(value, scratch); 1351 br(EQ, Lexit); 1352 sub(count, count, 1); 1353 cbnz(count, Lloop); 1354 bind(Lexit); 1355 } 1356 1357 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1358 Register super_klass, 1359 Register temp_reg, 1360 Register temp2_reg, 1361 Label* L_success, 1362 Label* L_failure, 1363 bool set_cond_codes) { 1364 assert_different_registers(sub_klass, super_klass, temp_reg); 1365 if (temp2_reg != noreg) 1366 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1367 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1368 1369 Label L_fallthrough; 1370 int label_nulls = 0; 1371 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1372 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1373 assert(label_nulls <= 1, "at most one null in the batch"); 1374 1375 // a couple of useful fields in sub_klass: 1376 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1377 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1378 Address secondary_supers_addr(sub_klass, ss_offset); 1379 Address super_cache_addr( sub_klass, sc_offset); 1380 1381 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1382 1383 // Do a linear scan of the secondary super-klass chain. 1384 // This code is rarely used, so simplicity is a virtue here. 1385 // The repne_scan instruction uses fixed registers, which we must spill. 1386 // Don't worry too much about pre-existing connections with the input regs. 1387 1388 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1389 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1390 1391 RegSet pushed_registers; 1392 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1393 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1394 1395 if (super_klass != r0) { 1396 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1397 } 1398 1399 push(pushed_registers, sp); 1400 1401 // Get super_klass value into r0 (even if it was in r5 or r2). 1402 if (super_klass != r0) { 1403 mov(r0, super_klass); 1404 } 1405 1406 #ifndef PRODUCT 1407 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1408 Address pst_counter_addr(rscratch2); 1409 ldr(rscratch1, pst_counter_addr); 1410 add(rscratch1, rscratch1, 1); 1411 str(rscratch1, pst_counter_addr); 1412 #endif //PRODUCT 1413 1414 // We will consult the secondary-super array. 1415 ldr(r5, secondary_supers_addr); 1416 // Load the array length. 1417 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1418 // Skip to start of data. 1419 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1420 1421 cmp(sp, zr); // Clear Z flag; SP is never zero 1422 // Scan R2 words at [R5] for an occurrence of R0. 1423 // Set NZ/Z based on last compare. 1424 repne_scan(r5, r0, r2, rscratch1); 1425 1426 // Unspill the temp. registers: 1427 pop(pushed_registers, sp); 1428 1429 br(Assembler::NE, *L_failure); 1430 1431 // Success. Cache the super we found and proceed in triumph. 1432 str(super_klass, super_cache_addr); 1433 1434 if (L_success != &L_fallthrough) { 1435 b(*L_success); 1436 } 1437 1438 #undef IS_A_TEMP 1439 1440 bind(L_fallthrough); 1441 } 1442 1443 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1444 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1445 assert_different_registers(klass, rthread, scratch); 1446 1447 Label L_fallthrough, L_tmp; 1448 if (L_fast_path == nullptr) { 1449 L_fast_path = &L_fallthrough; 1450 } else if (L_slow_path == nullptr) { 1451 L_slow_path = &L_fallthrough; 1452 } 1453 // Fast path check: class is fully initialized 1454 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset())); 1455 subs(zr, scratch, InstanceKlass::fully_initialized); 1456 br(Assembler::EQ, *L_fast_path); 1457 1458 // Fast path check: current thread is initializer thread 1459 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1460 cmp(rthread, scratch); 1461 1462 if (L_slow_path == &L_fallthrough) { 1463 br(Assembler::EQ, *L_fast_path); 1464 bind(*L_slow_path); 1465 } else if (L_fast_path == &L_fallthrough) { 1466 br(Assembler::NE, *L_slow_path); 1467 bind(*L_fast_path); 1468 } else { 1469 Unimplemented(); 1470 } 1471 } 1472 1473 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1474 if (!VerifyOops) return; 1475 1476 // Pass register number to verify_oop_subroutine 1477 const char* b = nullptr; 1478 { 1479 ResourceMark rm; 1480 stringStream ss; 1481 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1482 b = code_string(ss.as_string()); 1483 } 1484 BLOCK_COMMENT("verify_oop {"); 1485 1486 strip_return_address(); // This might happen within a stack frame. 1487 protect_return_address(); 1488 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1489 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1490 1491 mov(r0, reg); 1492 movptr(rscratch1, (uintptr_t)(address)b); 1493 1494 // call indirectly to solve generation ordering problem 1495 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1496 ldr(rscratch2, Address(rscratch2)); 1497 blr(rscratch2); 1498 1499 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1500 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1501 authenticate_return_address(); 1502 1503 BLOCK_COMMENT("} verify_oop"); 1504 } 1505 1506 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1507 if (!VerifyOops) return; 1508 1509 const char* b = nullptr; 1510 { 1511 ResourceMark rm; 1512 stringStream ss; 1513 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1514 b = code_string(ss.as_string()); 1515 } 1516 BLOCK_COMMENT("verify_oop_addr {"); 1517 1518 strip_return_address(); // This might happen within a stack frame. 1519 protect_return_address(); 1520 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1521 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1522 1523 // addr may contain sp so we will have to adjust it based on the 1524 // pushes that we just did. 1525 if (addr.uses(sp)) { 1526 lea(r0, addr); 1527 ldr(r0, Address(r0, 4 * wordSize)); 1528 } else { 1529 ldr(r0, addr); 1530 } 1531 movptr(rscratch1, (uintptr_t)(address)b); 1532 1533 // call indirectly to solve generation ordering problem 1534 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1535 ldr(rscratch2, Address(rscratch2)); 1536 blr(rscratch2); 1537 1538 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1539 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1540 authenticate_return_address(); 1541 1542 BLOCK_COMMENT("} verify_oop_addr"); 1543 } 1544 1545 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1546 int extra_slot_offset) { 1547 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1548 int stackElementSize = Interpreter::stackElementSize; 1549 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1550 #ifdef ASSERT 1551 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1552 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1553 #endif 1554 if (arg_slot.is_constant()) { 1555 return Address(esp, arg_slot.as_constant() * stackElementSize 1556 + offset); 1557 } else { 1558 add(rscratch1, esp, arg_slot.as_register(), 1559 ext::uxtx, exact_log2(stackElementSize)); 1560 return Address(rscratch1, offset); 1561 } 1562 } 1563 1564 void MacroAssembler::call_VM_leaf_base(address entry_point, 1565 int number_of_arguments, 1566 Label *retaddr) { 1567 Label E, L; 1568 1569 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1570 1571 mov(rscratch1, entry_point); 1572 blr(rscratch1); 1573 if (retaddr) 1574 bind(*retaddr); 1575 1576 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1577 } 1578 1579 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1580 call_VM_leaf_base(entry_point, number_of_arguments); 1581 } 1582 1583 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1584 pass_arg0(this, arg_0); 1585 call_VM_leaf_base(entry_point, 1); 1586 } 1587 1588 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1589 pass_arg0(this, arg_0); 1590 pass_arg1(this, arg_1); 1591 call_VM_leaf_base(entry_point, 2); 1592 } 1593 1594 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1595 Register arg_1, Register arg_2) { 1596 pass_arg0(this, arg_0); 1597 pass_arg1(this, arg_1); 1598 pass_arg2(this, arg_2); 1599 call_VM_leaf_base(entry_point, 3); 1600 } 1601 1602 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1603 pass_arg0(this, arg_0); 1604 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1605 } 1606 1607 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1608 1609 assert(arg_0 != c_rarg1, "smashed arg"); 1610 pass_arg1(this, arg_1); 1611 pass_arg0(this, arg_0); 1612 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1613 } 1614 1615 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1616 assert(arg_0 != c_rarg2, "smashed arg"); 1617 assert(arg_1 != c_rarg2, "smashed arg"); 1618 pass_arg2(this, arg_2); 1619 assert(arg_0 != c_rarg1, "smashed arg"); 1620 pass_arg1(this, arg_1); 1621 pass_arg0(this, arg_0); 1622 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1623 } 1624 1625 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1626 assert(arg_0 != c_rarg3, "smashed arg"); 1627 assert(arg_1 != c_rarg3, "smashed arg"); 1628 assert(arg_2 != c_rarg3, "smashed arg"); 1629 pass_arg3(this, arg_3); 1630 assert(arg_0 != c_rarg2, "smashed arg"); 1631 assert(arg_1 != c_rarg2, "smashed arg"); 1632 pass_arg2(this, arg_2); 1633 assert(arg_0 != c_rarg1, "smashed arg"); 1634 pass_arg1(this, arg_1); 1635 pass_arg0(this, arg_0); 1636 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1637 } 1638 1639 void MacroAssembler::null_check(Register reg, int offset) { 1640 if (needs_explicit_null_check(offset)) { 1641 // provoke OS null exception if reg is null by 1642 // accessing M[reg] w/o changing any registers 1643 // NOTE: this is plenty to provoke a segv 1644 ldr(zr, Address(reg)); 1645 } else { 1646 // nothing to do, (later) access of M[reg + offset] 1647 // will provoke OS null exception if reg is null 1648 } 1649 } 1650 1651 // MacroAssembler protected routines needed to implement 1652 // public methods 1653 1654 void MacroAssembler::mov(Register r, Address dest) { 1655 code_section()->relocate(pc(), dest.rspec()); 1656 uint64_t imm64 = (uint64_t)dest.target(); 1657 movptr(r, imm64); 1658 } 1659 1660 // Move a constant pointer into r. In AArch64 mode the virtual 1661 // address space is 48 bits in size, so we only need three 1662 // instructions to create a patchable instruction sequence that can 1663 // reach anywhere. 1664 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1665 #ifndef PRODUCT 1666 { 1667 char buffer[64]; 1668 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 1669 block_comment(buffer); 1670 } 1671 #endif 1672 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 1673 movz(r, imm64 & 0xffff); 1674 imm64 >>= 16; 1675 movk(r, imm64 & 0xffff, 16); 1676 imm64 >>= 16; 1677 movk(r, imm64 & 0xffff, 32); 1678 } 1679 1680 // Macro to mov replicated immediate to vector register. 1681 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 1682 // the upper 56/48/32 bits must be zeros for B/H/S type. 1683 // Vd will get the following values for different arrangements in T 1684 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 1685 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1686 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 1687 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1688 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1689 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1690 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 1691 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 1692 // Clobbers rscratch1 1693 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 1694 assert(T != T1Q, "unsupported"); 1695 if (T == T1D || T == T2D) { 1696 int imm = operand_valid_for_movi_immediate(imm64, T); 1697 if (-1 != imm) { 1698 movi(Vd, T, imm); 1699 } else { 1700 mov(rscratch1, imm64); 1701 dup(Vd, T, rscratch1); 1702 } 1703 return; 1704 } 1705 1706 #ifdef ASSERT 1707 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 1708 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 1709 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 1710 #endif 1711 int shift = operand_valid_for_movi_immediate(imm64, T); 1712 uint32_t imm32 = imm64 & 0xffffffffULL; 1713 if (shift >= 0) { 1714 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 1715 } else { 1716 movw(rscratch1, imm32); 1717 dup(Vd, T, rscratch1); 1718 } 1719 } 1720 1721 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 1722 { 1723 #ifndef PRODUCT 1724 { 1725 char buffer[64]; 1726 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 1727 block_comment(buffer); 1728 } 1729 #endif 1730 if (operand_valid_for_logical_immediate(false, imm64)) { 1731 orr(dst, zr, imm64); 1732 } else { 1733 // we can use a combination of MOVZ or MOVN with 1734 // MOVK to build up the constant 1735 uint64_t imm_h[4]; 1736 int zero_count = 0; 1737 int neg_count = 0; 1738 int i; 1739 for (i = 0; i < 4; i++) { 1740 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1741 if (imm_h[i] == 0) { 1742 zero_count++; 1743 } else if (imm_h[i] == 0xffffL) { 1744 neg_count++; 1745 } 1746 } 1747 if (zero_count == 4) { 1748 // one MOVZ will do 1749 movz(dst, 0); 1750 } else if (neg_count == 4) { 1751 // one MOVN will do 1752 movn(dst, 0); 1753 } else if (zero_count == 3) { 1754 for (i = 0; i < 4; i++) { 1755 if (imm_h[i] != 0L) { 1756 movz(dst, (uint32_t)imm_h[i], (i << 4)); 1757 break; 1758 } 1759 } 1760 } else if (neg_count == 3) { 1761 // one MOVN will do 1762 for (int i = 0; i < 4; i++) { 1763 if (imm_h[i] != 0xffffL) { 1764 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1765 break; 1766 } 1767 } 1768 } else if (zero_count == 2) { 1769 // one MOVZ and one MOVK will do 1770 for (i = 0; i < 3; i++) { 1771 if (imm_h[i] != 0L) { 1772 movz(dst, (uint32_t)imm_h[i], (i << 4)); 1773 i++; 1774 break; 1775 } 1776 } 1777 for (;i < 4; i++) { 1778 if (imm_h[i] != 0L) { 1779 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1780 } 1781 } 1782 } else if (neg_count == 2) { 1783 // one MOVN and one MOVK will do 1784 for (i = 0; i < 4; i++) { 1785 if (imm_h[i] != 0xffffL) { 1786 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1787 i++; 1788 break; 1789 } 1790 } 1791 for (;i < 4; i++) { 1792 if (imm_h[i] != 0xffffL) { 1793 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1794 } 1795 } 1796 } else if (zero_count == 1) { 1797 // one MOVZ and two MOVKs will do 1798 for (i = 0; i < 4; i++) { 1799 if (imm_h[i] != 0L) { 1800 movz(dst, (uint32_t)imm_h[i], (i << 4)); 1801 i++; 1802 break; 1803 } 1804 } 1805 for (;i < 4; i++) { 1806 if (imm_h[i] != 0x0L) { 1807 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1808 } 1809 } 1810 } else if (neg_count == 1) { 1811 // one MOVN and two MOVKs will do 1812 for (i = 0; i < 4; i++) { 1813 if (imm_h[i] != 0xffffL) { 1814 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1815 i++; 1816 break; 1817 } 1818 } 1819 for (;i < 4; i++) { 1820 if (imm_h[i] != 0xffffL) { 1821 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1822 } 1823 } 1824 } else { 1825 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1826 movz(dst, (uint32_t)imm_h[0], 0); 1827 for (i = 1; i < 4; i++) { 1828 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1829 } 1830 } 1831 } 1832 } 1833 1834 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 1835 { 1836 #ifndef PRODUCT 1837 { 1838 char buffer[64]; 1839 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 1840 block_comment(buffer); 1841 } 1842 #endif 1843 if (operand_valid_for_logical_immediate(true, imm32)) { 1844 orrw(dst, zr, imm32); 1845 } else { 1846 // we can use MOVZ, MOVN or two calls to MOVK to build up the 1847 // constant 1848 uint32_t imm_h[2]; 1849 imm_h[0] = imm32 & 0xffff; 1850 imm_h[1] = ((imm32 >> 16) & 0xffff); 1851 if (imm_h[0] == 0) { 1852 movzw(dst, imm_h[1], 16); 1853 } else if (imm_h[0] == 0xffff) { 1854 movnw(dst, imm_h[1] ^ 0xffff, 16); 1855 } else if (imm_h[1] == 0) { 1856 movzw(dst, imm_h[0], 0); 1857 } else if (imm_h[1] == 0xffff) { 1858 movnw(dst, imm_h[0] ^ 0xffff, 0); 1859 } else { 1860 // use a MOVZ and MOVK (makes it easier to debug) 1861 movzw(dst, imm_h[0], 0); 1862 movkw(dst, imm_h[1], 16); 1863 } 1864 } 1865 } 1866 1867 // Form an address from base + offset in Rd. Rd may or may 1868 // not actually be used: you must use the Address that is returned. 1869 // It is up to you to ensure that the shift provided matches the size 1870 // of your data. 1871 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 1872 if (Address::offset_ok_for_immed(byte_offset, shift)) 1873 // It fits; no need for any heroics 1874 return Address(base, byte_offset); 1875 1876 // Don't do anything clever with negative or misaligned offsets 1877 unsigned mask = (1 << shift) - 1; 1878 if (byte_offset < 0 || byte_offset & mask) { 1879 mov(Rd, byte_offset); 1880 add(Rd, base, Rd); 1881 return Address(Rd); 1882 } 1883 1884 // See if we can do this with two 12-bit offsets 1885 { 1886 uint64_t word_offset = byte_offset >> shift; 1887 uint64_t masked_offset = word_offset & 0xfff000; 1888 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 1889 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 1890 add(Rd, base, masked_offset << shift); 1891 word_offset -= masked_offset; 1892 return Address(Rd, word_offset << shift); 1893 } 1894 } 1895 1896 // Do it the hard way 1897 mov(Rd, byte_offset); 1898 add(Rd, base, Rd); 1899 return Address(Rd); 1900 } 1901 1902 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 1903 bool want_remainder, Register scratch) 1904 { 1905 // Full implementation of Java idiv and irem. The function 1906 // returns the (pc) offset of the div instruction - may be needed 1907 // for implicit exceptions. 1908 // 1909 // constraint : ra/rb =/= scratch 1910 // normal case 1911 // 1912 // input : ra: dividend 1913 // rb: divisor 1914 // 1915 // result: either 1916 // quotient (= ra idiv rb) 1917 // remainder (= ra irem rb) 1918 1919 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1920 1921 int idivl_offset = offset(); 1922 if (! want_remainder) { 1923 sdivw(result, ra, rb); 1924 } else { 1925 sdivw(scratch, ra, rb); 1926 Assembler::msubw(result, scratch, rb, ra); 1927 } 1928 1929 return idivl_offset; 1930 } 1931 1932 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 1933 bool want_remainder, Register scratch) 1934 { 1935 // Full implementation of Java ldiv and lrem. The function 1936 // returns the (pc) offset of the div instruction - may be needed 1937 // for implicit exceptions. 1938 // 1939 // constraint : ra/rb =/= scratch 1940 // normal case 1941 // 1942 // input : ra: dividend 1943 // rb: divisor 1944 // 1945 // result: either 1946 // quotient (= ra idiv rb) 1947 // remainder (= ra irem rb) 1948 1949 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1950 1951 int idivq_offset = offset(); 1952 if (! want_remainder) { 1953 sdiv(result, ra, rb); 1954 } else { 1955 sdiv(scratch, ra, rb); 1956 Assembler::msub(result, scratch, rb, ra); 1957 } 1958 1959 return idivq_offset; 1960 } 1961 1962 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 1963 address prev = pc() - NativeMembar::instruction_size; 1964 address last = code()->last_insn(); 1965 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 1966 NativeMembar *bar = NativeMembar_at(prev); 1967 // We are merging two memory barrier instructions. On AArch64 we 1968 // can do this simply by ORing them together. 1969 bar->set_kind(bar->get_kind() | order_constraint); 1970 BLOCK_COMMENT("merged membar"); 1971 } else { 1972 code()->set_last_insn(pc()); 1973 dmb(Assembler::barrier(order_constraint)); 1974 } 1975 } 1976 1977 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 1978 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 1979 merge_ldst(rt, adr, size_in_bytes, is_store); 1980 code()->clear_last_insn(); 1981 return true; 1982 } else { 1983 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 1984 const uint64_t mask = size_in_bytes - 1; 1985 if (adr.getMode() == Address::base_plus_offset && 1986 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 1987 code()->set_last_insn(pc()); 1988 } 1989 return false; 1990 } 1991 } 1992 1993 void MacroAssembler::ldr(Register Rx, const Address &adr) { 1994 // We always try to merge two adjacent loads into one ldp. 1995 if (!try_merge_ldst(Rx, adr, 8, false)) { 1996 Assembler::ldr(Rx, adr); 1997 } 1998 } 1999 2000 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2001 // We always try to merge two adjacent loads into one ldp. 2002 if (!try_merge_ldst(Rw, adr, 4, false)) { 2003 Assembler::ldrw(Rw, adr); 2004 } 2005 } 2006 2007 void MacroAssembler::str(Register Rx, const Address &adr) { 2008 // We always try to merge two adjacent stores into one stp. 2009 if (!try_merge_ldst(Rx, adr, 8, true)) { 2010 Assembler::str(Rx, adr); 2011 } 2012 } 2013 2014 void MacroAssembler::strw(Register Rw, const Address &adr) { 2015 // We always try to merge two adjacent stores into one stp. 2016 if (!try_merge_ldst(Rw, adr, 4, true)) { 2017 Assembler::strw(Rw, adr); 2018 } 2019 } 2020 2021 // MacroAssembler routines found actually to be needed 2022 2023 void MacroAssembler::push(Register src) 2024 { 2025 str(src, Address(pre(esp, -1 * wordSize))); 2026 } 2027 2028 void MacroAssembler::pop(Register dst) 2029 { 2030 ldr(dst, Address(post(esp, 1 * wordSize))); 2031 } 2032 2033 // Note: load_unsigned_short used to be called load_unsigned_word. 2034 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2035 int off = offset(); 2036 ldrh(dst, src); 2037 return off; 2038 } 2039 2040 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2041 int off = offset(); 2042 ldrb(dst, src); 2043 return off; 2044 } 2045 2046 int MacroAssembler::load_signed_short(Register dst, Address src) { 2047 int off = offset(); 2048 ldrsh(dst, src); 2049 return off; 2050 } 2051 2052 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2053 int off = offset(); 2054 ldrsb(dst, src); 2055 return off; 2056 } 2057 2058 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2059 int off = offset(); 2060 ldrshw(dst, src); 2061 return off; 2062 } 2063 2064 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2065 int off = offset(); 2066 ldrsbw(dst, src); 2067 return off; 2068 } 2069 2070 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2071 switch (size_in_bytes) { 2072 case 8: ldr(dst, src); break; 2073 case 4: ldrw(dst, src); break; 2074 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2075 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2076 default: ShouldNotReachHere(); 2077 } 2078 } 2079 2080 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2081 switch (size_in_bytes) { 2082 case 8: str(src, dst); break; 2083 case 4: strw(src, dst); break; 2084 case 2: strh(src, dst); break; 2085 case 1: strb(src, dst); break; 2086 default: ShouldNotReachHere(); 2087 } 2088 } 2089 2090 void MacroAssembler::decrementw(Register reg, int value) 2091 { 2092 if (value < 0) { incrementw(reg, -value); return; } 2093 if (value == 0) { return; } 2094 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2095 /* else */ { 2096 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2097 movw(rscratch2, (unsigned)value); 2098 subw(reg, reg, rscratch2); 2099 } 2100 } 2101 2102 void MacroAssembler::decrement(Register reg, int value) 2103 { 2104 if (value < 0) { increment(reg, -value); return; } 2105 if (value == 0) { return; } 2106 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2107 /* else */ { 2108 assert(reg != rscratch2, "invalid dst for register decrement"); 2109 mov(rscratch2, (uint64_t)value); 2110 sub(reg, reg, rscratch2); 2111 } 2112 } 2113 2114 void MacroAssembler::decrementw(Address dst, int value) 2115 { 2116 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2117 if (dst.getMode() == Address::literal) { 2118 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2119 lea(rscratch2, dst); 2120 dst = Address(rscratch2); 2121 } 2122 ldrw(rscratch1, dst); 2123 decrementw(rscratch1, value); 2124 strw(rscratch1, dst); 2125 } 2126 2127 void MacroAssembler::decrement(Address dst, int value) 2128 { 2129 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2130 if (dst.getMode() == Address::literal) { 2131 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2132 lea(rscratch2, dst); 2133 dst = Address(rscratch2); 2134 } 2135 ldr(rscratch1, dst); 2136 decrement(rscratch1, value); 2137 str(rscratch1, dst); 2138 } 2139 2140 void MacroAssembler::incrementw(Register reg, int value) 2141 { 2142 if (value < 0) { decrementw(reg, -value); return; } 2143 if (value == 0) { return; } 2144 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2145 /* else */ { 2146 assert(reg != rscratch2, "invalid dst for register increment"); 2147 movw(rscratch2, (unsigned)value); 2148 addw(reg, reg, rscratch2); 2149 } 2150 } 2151 2152 void MacroAssembler::increment(Register reg, int value) 2153 { 2154 if (value < 0) { decrement(reg, -value); return; } 2155 if (value == 0) { return; } 2156 if (value < (1 << 12)) { add(reg, reg, value); return; } 2157 /* else */ { 2158 assert(reg != rscratch2, "invalid dst for register increment"); 2159 movw(rscratch2, (unsigned)value); 2160 add(reg, reg, rscratch2); 2161 } 2162 } 2163 2164 void MacroAssembler::incrementw(Address dst, int value) 2165 { 2166 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2167 if (dst.getMode() == Address::literal) { 2168 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2169 lea(rscratch2, dst); 2170 dst = Address(rscratch2); 2171 } 2172 ldrw(rscratch1, dst); 2173 incrementw(rscratch1, value); 2174 strw(rscratch1, dst); 2175 } 2176 2177 void MacroAssembler::increment(Address dst, int value) 2178 { 2179 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2180 if (dst.getMode() == Address::literal) { 2181 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2182 lea(rscratch2, dst); 2183 dst = Address(rscratch2); 2184 } 2185 ldr(rscratch1, dst); 2186 increment(rscratch1, value); 2187 str(rscratch1, dst); 2188 } 2189 2190 // Push lots of registers in the bit set supplied. Don't push sp. 2191 // Return the number of words pushed 2192 int MacroAssembler::push(unsigned int bitset, Register stack) { 2193 int words_pushed = 0; 2194 2195 // Scan bitset to accumulate register pairs 2196 unsigned char regs[32]; 2197 int count = 0; 2198 for (int reg = 0; reg <= 30; reg++) { 2199 if (1 & bitset) 2200 regs[count++] = reg; 2201 bitset >>= 1; 2202 } 2203 regs[count++] = zr->raw_encoding(); 2204 count &= ~1; // Only push an even number of regs 2205 2206 if (count) { 2207 stp(as_Register(regs[0]), as_Register(regs[1]), 2208 Address(pre(stack, -count * wordSize))); 2209 words_pushed += 2; 2210 } 2211 for (int i = 2; i < count; i += 2) { 2212 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2213 Address(stack, i * wordSize)); 2214 words_pushed += 2; 2215 } 2216 2217 assert(words_pushed == count, "oops, pushed != count"); 2218 2219 return count; 2220 } 2221 2222 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2223 int words_pushed = 0; 2224 2225 // Scan bitset to accumulate register pairs 2226 unsigned char regs[32]; 2227 int count = 0; 2228 for (int reg = 0; reg <= 30; reg++) { 2229 if (1 & bitset) 2230 regs[count++] = reg; 2231 bitset >>= 1; 2232 } 2233 regs[count++] = zr->raw_encoding(); 2234 count &= ~1; 2235 2236 for (int i = 2; i < count; i += 2) { 2237 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2238 Address(stack, i * wordSize)); 2239 words_pushed += 2; 2240 } 2241 if (count) { 2242 ldp(as_Register(regs[0]), as_Register(regs[1]), 2243 Address(post(stack, count * wordSize))); 2244 words_pushed += 2; 2245 } 2246 2247 assert(words_pushed == count, "oops, pushed != count"); 2248 2249 return count; 2250 } 2251 2252 // Push lots of registers in the bit set supplied. Don't push sp. 2253 // Return the number of dwords pushed 2254 int MacroAssembler::push_fp(unsigned int bitset, Register stack) { 2255 int words_pushed = 0; 2256 bool use_sve = false; 2257 int sve_vector_size_in_bytes = 0; 2258 2259 #ifdef COMPILER2 2260 use_sve = Matcher::supports_scalable_vector(); 2261 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2262 #endif 2263 2264 // Scan bitset to accumulate register pairs 2265 unsigned char regs[32]; 2266 int count = 0; 2267 for (int reg = 0; reg <= 31; reg++) { 2268 if (1 & bitset) 2269 regs[count++] = reg; 2270 bitset >>= 1; 2271 } 2272 2273 if (count == 0) { 2274 return 0; 2275 } 2276 2277 // SVE 2278 if (use_sve && sve_vector_size_in_bytes > 16) { 2279 sub(stack, stack, sve_vector_size_in_bytes * count); 2280 for (int i = 0; i < count; i++) { 2281 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2282 } 2283 return count * sve_vector_size_in_bytes / 8; 2284 } 2285 2286 // NEON 2287 if (count == 1) { 2288 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2289 return 2; 2290 } 2291 2292 bool odd = (count & 1) == 1; 2293 int push_slots = count + (odd ? 1 : 0); 2294 2295 // Always pushing full 128 bit registers. 2296 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2297 words_pushed += 2; 2298 2299 for (int i = 2; i + 1 < count; i += 2) { 2300 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2301 words_pushed += 2; 2302 } 2303 2304 if (odd) { 2305 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2306 words_pushed++; 2307 } 2308 2309 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2310 return count * 2; 2311 } 2312 2313 // Return the number of dwords popped 2314 int MacroAssembler::pop_fp(unsigned int bitset, Register stack) { 2315 int words_pushed = 0; 2316 bool use_sve = false; 2317 int sve_vector_size_in_bytes = 0; 2318 2319 #ifdef COMPILER2 2320 use_sve = Matcher::supports_scalable_vector(); 2321 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2322 #endif 2323 // Scan bitset to accumulate register pairs 2324 unsigned char regs[32]; 2325 int count = 0; 2326 for (int reg = 0; reg <= 31; reg++) { 2327 if (1 & bitset) 2328 regs[count++] = reg; 2329 bitset >>= 1; 2330 } 2331 2332 if (count == 0) { 2333 return 0; 2334 } 2335 2336 // SVE 2337 if (use_sve && sve_vector_size_in_bytes > 16) { 2338 for (int i = count - 1; i >= 0; i--) { 2339 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2340 } 2341 add(stack, stack, sve_vector_size_in_bytes * count); 2342 return count * sve_vector_size_in_bytes / 8; 2343 } 2344 2345 // NEON 2346 if (count == 1) { 2347 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2348 return 2; 2349 } 2350 2351 bool odd = (count & 1) == 1; 2352 int push_slots = count + (odd ? 1 : 0); 2353 2354 if (odd) { 2355 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2356 words_pushed++; 2357 } 2358 2359 for (int i = 2; i + 1 < count; i += 2) { 2360 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2361 words_pushed += 2; 2362 } 2363 2364 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2365 words_pushed += 2; 2366 2367 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2368 2369 return count * 2; 2370 } 2371 2372 // Return the number of dwords pushed 2373 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2374 bool use_sve = false; 2375 int sve_predicate_size_in_slots = 0; 2376 2377 #ifdef COMPILER2 2378 use_sve = Matcher::supports_scalable_vector(); 2379 if (use_sve) { 2380 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2381 } 2382 #endif 2383 2384 if (!use_sve) { 2385 return 0; 2386 } 2387 2388 unsigned char regs[PRegister::number_of_registers]; 2389 int count = 0; 2390 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2391 if (1 & bitset) 2392 regs[count++] = reg; 2393 bitset >>= 1; 2394 } 2395 2396 if (count == 0) { 2397 return 0; 2398 } 2399 2400 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2401 VMRegImpl::stack_slot_size * count, 16); 2402 sub(stack, stack, total_push_bytes); 2403 for (int i = 0; i < count; i++) { 2404 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2405 } 2406 return total_push_bytes / 8; 2407 } 2408 2409 // Return the number of dwords popped 2410 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2411 bool use_sve = false; 2412 int sve_predicate_size_in_slots = 0; 2413 2414 #ifdef COMPILER2 2415 use_sve = Matcher::supports_scalable_vector(); 2416 if (use_sve) { 2417 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2418 } 2419 #endif 2420 2421 if (!use_sve) { 2422 return 0; 2423 } 2424 2425 unsigned char regs[PRegister::number_of_registers]; 2426 int count = 0; 2427 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2428 if (1 & bitset) 2429 regs[count++] = reg; 2430 bitset >>= 1; 2431 } 2432 2433 if (count == 0) { 2434 return 0; 2435 } 2436 2437 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2438 VMRegImpl::stack_slot_size * count, 16); 2439 for (int i = count - 1; i >= 0; i--) { 2440 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2441 } 2442 add(stack, stack, total_pop_bytes); 2443 return total_pop_bytes / 8; 2444 } 2445 2446 #ifdef ASSERT 2447 void MacroAssembler::verify_heapbase(const char* msg) { 2448 #if 0 2449 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2450 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2451 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2452 // rheapbase is allocated as general register 2453 return; 2454 } 2455 if (CheckCompressedOops) { 2456 Label ok; 2457 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2458 cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2459 br(Assembler::EQ, ok); 2460 stop(msg); 2461 bind(ok); 2462 pop(1 << rscratch1->encoding(), sp); 2463 } 2464 #endif 2465 } 2466 #endif 2467 2468 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 2469 assert_different_registers(value, tmp1, tmp2); 2470 Label done, tagged, weak_tagged; 2471 2472 cbz(value, done); // Use null as-is. 2473 tst(value, JNIHandles::tag_mask); // Test for tag. 2474 br(Assembler::NE, tagged); 2475 2476 // Resolve local handle 2477 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 2478 verify_oop(value); 2479 b(done); 2480 2481 bind(tagged); 2482 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 2483 tbnz(value, 0, weak_tagged); // Test for weak tag. 2484 2485 // Resolve global handle 2486 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 2487 verify_oop(value); 2488 b(done); 2489 2490 bind(weak_tagged); 2491 // Resolve jweak. 2492 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 2493 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 2494 verify_oop(value); 2495 2496 bind(done); 2497 } 2498 2499 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 2500 assert_different_registers(value, tmp1, tmp2); 2501 Label done; 2502 2503 cbz(value, done); // Use null as-is. 2504 2505 #ifdef ASSERT 2506 { 2507 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 2508 Label valid_global_tag; 2509 tbnz(value, 1, valid_global_tag); // Test for global tag 2510 stop("non global jobject using resolve_global_jobject"); 2511 bind(valid_global_tag); 2512 } 2513 #endif 2514 2515 // Resolve global handle 2516 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 2517 verify_oop(value); 2518 2519 bind(done); 2520 } 2521 2522 void MacroAssembler::stop(const char* msg) { 2523 BLOCK_COMMENT(msg); 2524 dcps1(0xdeae); 2525 emit_int64((uintptr_t)msg); 2526 } 2527 2528 void MacroAssembler::unimplemented(const char* what) { 2529 const char* buf = nullptr; 2530 { 2531 ResourceMark rm; 2532 stringStream ss; 2533 ss.print("unimplemented: %s", what); 2534 buf = code_string(ss.as_string()); 2535 } 2536 stop(buf); 2537 } 2538 2539 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 2540 #ifdef ASSERT 2541 Label OK; 2542 br(cc, OK); 2543 stop(msg); 2544 bind(OK); 2545 #endif 2546 } 2547 2548 // If a constant does not fit in an immediate field, generate some 2549 // number of MOV instructions and then perform the operation. 2550 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 2551 add_sub_imm_insn insn1, 2552 add_sub_reg_insn insn2, 2553 bool is32) { 2554 assert(Rd != zr, "Rd = zr and not setting flags?"); 2555 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 2556 if (fits) { 2557 (this->*insn1)(Rd, Rn, imm); 2558 } else { 2559 if (uabs(imm) < (1 << 24)) { 2560 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 2561 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 2562 } else { 2563 assert_different_registers(Rd, Rn); 2564 mov(Rd, imm); 2565 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2566 } 2567 } 2568 } 2569 2570 // Separate vsn which sets the flags. Optimisations are more restricted 2571 // because we must set the flags correctly. 2572 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 2573 add_sub_imm_insn insn1, 2574 add_sub_reg_insn insn2, 2575 bool is32) { 2576 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 2577 if (fits) { 2578 (this->*insn1)(Rd, Rn, imm); 2579 } else { 2580 assert_different_registers(Rd, Rn); 2581 assert(Rd != zr, "overflow in immediate operand"); 2582 mov(Rd, imm); 2583 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2584 } 2585 } 2586 2587 2588 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 2589 if (increment.is_register()) { 2590 add(Rd, Rn, increment.as_register()); 2591 } else { 2592 add(Rd, Rn, increment.as_constant()); 2593 } 2594 } 2595 2596 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2597 if (increment.is_register()) { 2598 addw(Rd, Rn, increment.as_register()); 2599 } else { 2600 addw(Rd, Rn, increment.as_constant()); 2601 } 2602 } 2603 2604 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 2605 if (decrement.is_register()) { 2606 sub(Rd, Rn, decrement.as_register()); 2607 } else { 2608 sub(Rd, Rn, decrement.as_constant()); 2609 } 2610 } 2611 2612 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 2613 if (decrement.is_register()) { 2614 subw(Rd, Rn, decrement.as_register()); 2615 } else { 2616 subw(Rd, Rn, decrement.as_constant()); 2617 } 2618 } 2619 2620 void MacroAssembler::reinit_heapbase() 2621 { 2622 if (UseCompressedOops) { 2623 if (Universe::is_fully_initialized()) { 2624 mov(rheapbase, CompressedOops::ptrs_base()); 2625 } else { 2626 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2627 ldr(rheapbase, Address(rheapbase)); 2628 } 2629 } 2630 } 2631 2632 // this simulates the behaviour of the x86 cmpxchg instruction using a 2633 // load linked/store conditional pair. we use the acquire/release 2634 // versions of these instructions so that we flush pending writes as 2635 // per Java semantics. 2636 2637 // n.b the x86 version assumes the old value to be compared against is 2638 // in rax and updates rax with the value located in memory if the 2639 // cmpxchg fails. we supply a register for the old value explicitly 2640 2641 // the aarch64 load linked/store conditional instructions do not 2642 // accept an offset. so, unlike x86, we must provide a plain register 2643 // to identify the memory word to be compared/exchanged rather than a 2644 // register+offset Address. 2645 2646 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2647 Label &succeed, Label *fail) { 2648 // oldv holds comparison value 2649 // newv holds value to write in exchange 2650 // addr identifies memory word to compare against/update 2651 if (UseLSE) { 2652 mov(tmp, oldv); 2653 casal(Assembler::xword, oldv, newv, addr); 2654 cmp(tmp, oldv); 2655 br(Assembler::EQ, succeed); 2656 membar(AnyAny); 2657 } else { 2658 Label retry_load, nope; 2659 prfm(Address(addr), PSTL1STRM); 2660 bind(retry_load); 2661 // flush and load exclusive from the memory location 2662 // and fail if it is not what we expect 2663 ldaxr(tmp, addr); 2664 cmp(tmp, oldv); 2665 br(Assembler::NE, nope); 2666 // if we store+flush with no intervening write tmp will be zero 2667 stlxr(tmp, newv, addr); 2668 cbzw(tmp, succeed); 2669 // retry so we only ever return after a load fails to compare 2670 // ensures we don't return a stale value after a failed write. 2671 b(retry_load); 2672 // if the memory word differs we return it in oldv and signal a fail 2673 bind(nope); 2674 membar(AnyAny); 2675 mov(oldv, tmp); 2676 } 2677 if (fail) 2678 b(*fail); 2679 } 2680 2681 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 2682 Label &succeed, Label *fail) { 2683 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 2684 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 2685 } 2686 2687 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2688 Label &succeed, Label *fail) { 2689 // oldv holds comparison value 2690 // newv holds value to write in exchange 2691 // addr identifies memory word to compare against/update 2692 // tmp returns 0/1 for success/failure 2693 if (UseLSE) { 2694 mov(tmp, oldv); 2695 casal(Assembler::word, oldv, newv, addr); 2696 cmp(tmp, oldv); 2697 br(Assembler::EQ, succeed); 2698 membar(AnyAny); 2699 } else { 2700 Label retry_load, nope; 2701 prfm(Address(addr), PSTL1STRM); 2702 bind(retry_load); 2703 // flush and load exclusive from the memory location 2704 // and fail if it is not what we expect 2705 ldaxrw(tmp, addr); 2706 cmp(tmp, oldv); 2707 br(Assembler::NE, nope); 2708 // if we store+flush with no intervening write tmp will be zero 2709 stlxrw(tmp, newv, addr); 2710 cbzw(tmp, succeed); 2711 // retry so we only ever return after a load fails to compare 2712 // ensures we don't return a stale value after a failed write. 2713 b(retry_load); 2714 // if the memory word differs we return it in oldv and signal a fail 2715 bind(nope); 2716 membar(AnyAny); 2717 mov(oldv, tmp); 2718 } 2719 if (fail) 2720 b(*fail); 2721 } 2722 2723 // A generic CAS; success or failure is in the EQ flag. A weak CAS 2724 // doesn't retry and may fail spuriously. If the oldval is wanted, 2725 // Pass a register for the result, otherwise pass noreg. 2726 2727 // Clobbers rscratch1 2728 void MacroAssembler::cmpxchg(Register addr, Register expected, 2729 Register new_val, 2730 enum operand_size size, 2731 bool acquire, bool release, 2732 bool weak, 2733 Register result) { 2734 if (result == noreg) result = rscratch1; 2735 BLOCK_COMMENT("cmpxchg {"); 2736 if (UseLSE) { 2737 mov(result, expected); 2738 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 2739 compare_eq(result, expected, size); 2740 #ifdef ASSERT 2741 // Poison rscratch1 which is written on !UseLSE branch 2742 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 2743 #endif 2744 } else { 2745 Label retry_load, done; 2746 prfm(Address(addr), PSTL1STRM); 2747 bind(retry_load); 2748 load_exclusive(result, addr, size, acquire); 2749 compare_eq(result, expected, size); 2750 br(Assembler::NE, done); 2751 store_exclusive(rscratch1, new_val, addr, size, release); 2752 if (weak) { 2753 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 2754 } else { 2755 cbnzw(rscratch1, retry_load); 2756 } 2757 bind(done); 2758 } 2759 BLOCK_COMMENT("} cmpxchg"); 2760 } 2761 2762 // A generic comparison. Only compares for equality, clobbers rscratch1. 2763 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 2764 if (size == xword) { 2765 cmp(rm, rn); 2766 } else if (size == word) { 2767 cmpw(rm, rn); 2768 } else if (size == halfword) { 2769 eorw(rscratch1, rm, rn); 2770 ands(zr, rscratch1, 0xffff); 2771 } else if (size == byte) { 2772 eorw(rscratch1, rm, rn); 2773 ands(zr, rscratch1, 0xff); 2774 } else { 2775 ShouldNotReachHere(); 2776 } 2777 } 2778 2779 2780 static bool different(Register a, RegisterOrConstant b, Register c) { 2781 if (b.is_constant()) 2782 return a != c; 2783 else 2784 return a != b.as_register() && a != c && b.as_register() != c; 2785 } 2786 2787 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 2788 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 2789 if (UseLSE) { \ 2790 prev = prev->is_valid() ? prev : zr; \ 2791 if (incr.is_register()) { \ 2792 AOP(sz, incr.as_register(), prev, addr); \ 2793 } else { \ 2794 mov(rscratch2, incr.as_constant()); \ 2795 AOP(sz, rscratch2, prev, addr); \ 2796 } \ 2797 return; \ 2798 } \ 2799 Register result = rscratch2; \ 2800 if (prev->is_valid()) \ 2801 result = different(prev, incr, addr) ? prev : rscratch2; \ 2802 \ 2803 Label retry_load; \ 2804 prfm(Address(addr), PSTL1STRM); \ 2805 bind(retry_load); \ 2806 LDXR(result, addr); \ 2807 OP(rscratch1, result, incr); \ 2808 STXR(rscratch2, rscratch1, addr); \ 2809 cbnzw(rscratch2, retry_load); \ 2810 if (prev->is_valid() && prev != result) { \ 2811 IOP(prev, rscratch1, incr); \ 2812 } \ 2813 } 2814 2815 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 2816 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 2817 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 2818 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 2819 2820 #undef ATOMIC_OP 2821 2822 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 2823 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2824 if (UseLSE) { \ 2825 prev = prev->is_valid() ? prev : zr; \ 2826 AOP(sz, newv, prev, addr); \ 2827 return; \ 2828 } \ 2829 Register result = rscratch2; \ 2830 if (prev->is_valid()) \ 2831 result = different(prev, newv, addr) ? prev : rscratch2; \ 2832 \ 2833 Label retry_load; \ 2834 prfm(Address(addr), PSTL1STRM); \ 2835 bind(retry_load); \ 2836 LDXR(result, addr); \ 2837 STXR(rscratch1, newv, addr); \ 2838 cbnzw(rscratch1, retry_load); \ 2839 if (prev->is_valid() && prev != result) \ 2840 mov(prev, result); \ 2841 } 2842 2843 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 2844 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 2845 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 2846 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 2847 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 2848 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 2849 2850 #undef ATOMIC_XCHG 2851 2852 #ifndef PRODUCT 2853 extern "C" void findpc(intptr_t x); 2854 #endif 2855 2856 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 2857 { 2858 // In order to get locks to work, we need to fake a in_VM state 2859 if (ShowMessageBoxOnError ) { 2860 JavaThread* thread = JavaThread::current(); 2861 JavaThreadState saved_state = thread->thread_state(); 2862 thread->set_thread_state(_thread_in_vm); 2863 #ifndef PRODUCT 2864 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2865 ttyLocker ttyl; 2866 BytecodeCounter::print(); 2867 } 2868 #endif 2869 if (os::message_box(msg, "Execution stopped, print registers?")) { 2870 ttyLocker ttyl; 2871 tty->print_cr(" pc = 0x%016" PRIx64, pc); 2872 #ifndef PRODUCT 2873 tty->cr(); 2874 findpc(pc); 2875 tty->cr(); 2876 #endif 2877 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 2878 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 2879 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 2880 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 2881 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 2882 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 2883 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 2884 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 2885 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 2886 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 2887 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 2888 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 2889 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 2890 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 2891 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 2892 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 2893 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 2894 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 2895 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 2896 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 2897 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 2898 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 2899 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 2900 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 2901 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 2902 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 2903 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 2904 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 2905 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 2906 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 2907 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 2908 BREAKPOINT; 2909 } 2910 } 2911 fatal("DEBUG MESSAGE: %s", msg); 2912 } 2913 2914 RegSet MacroAssembler::call_clobbered_gp_registers() { 2915 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 2916 #ifndef R18_RESERVED 2917 regs += r18_tls; 2918 #endif 2919 return regs; 2920 } 2921 2922 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 2923 int step = 4 * wordSize; 2924 push(call_clobbered_gp_registers() - exclude, sp); 2925 sub(sp, sp, step); 2926 mov(rscratch1, -step); 2927 // Push v0-v7, v16-v31. 2928 for (int i = 31; i>= 4; i -= 4) { 2929 if (i <= v7->encoding() || i >= v16->encoding()) 2930 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 2931 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 2932 } 2933 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 2934 as_FloatRegister(3), T1D, Address(sp)); 2935 } 2936 2937 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 2938 for (int i = 0; i < 32; i += 4) { 2939 if (i <= v7->encoding() || i >= v16->encoding()) 2940 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 2941 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 2942 } 2943 2944 reinitialize_ptrue(); 2945 2946 pop(call_clobbered_gp_registers() - exclude, sp); 2947 } 2948 2949 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 2950 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 2951 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 2952 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 2953 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 2954 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 2955 sve_str(as_FloatRegister(i), Address(sp, i)); 2956 } 2957 } else { 2958 int step = (save_vectors ? 8 : 4) * wordSize; 2959 mov(rscratch1, -step); 2960 sub(sp, sp, step); 2961 for (int i = 28; i >= 4; i -= 4) { 2962 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 2963 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 2964 } 2965 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 2966 } 2967 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 2968 sub(sp, sp, total_predicate_in_bytes); 2969 for (int i = 0; i < PRegister::number_of_registers; i++) { 2970 sve_str(as_PRegister(i), Address(sp, i)); 2971 } 2972 } 2973 } 2974 2975 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 2976 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 2977 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 2978 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 2979 sve_ldr(as_PRegister(i), Address(sp, i)); 2980 } 2981 add(sp, sp, total_predicate_in_bytes); 2982 } 2983 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 2984 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 2985 sve_ldr(as_FloatRegister(i), Address(sp, i)); 2986 } 2987 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 2988 } else { 2989 int step = (restore_vectors ? 8 : 4) * wordSize; 2990 for (int i = 0; i <= 28; i += 4) 2991 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 2992 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 2993 } 2994 2995 // We may use predicate registers and rely on ptrue with SVE, 2996 // regardless of wide vector (> 8 bytes) used or not. 2997 if (use_sve) { 2998 reinitialize_ptrue(); 2999 } 3000 3001 // integer registers except lr & sp 3002 pop(RegSet::range(r0, r17), sp); 3003 #ifdef R18_RESERVED 3004 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3005 pop(RegSet::range(r20, r29), sp); 3006 #else 3007 pop(RegSet::range(r18_tls, r29), sp); 3008 #endif 3009 } 3010 3011 /** 3012 * Helpers for multiply_to_len(). 3013 */ 3014 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3015 Register src1, Register src2) { 3016 adds(dest_lo, dest_lo, src1); 3017 adc(dest_hi, dest_hi, zr); 3018 adds(dest_lo, dest_lo, src2); 3019 adc(final_dest_hi, dest_hi, zr); 3020 } 3021 3022 // Generate an address from (r + r1 extend offset). "size" is the 3023 // size of the operand. The result may be in rscratch2. 3024 Address MacroAssembler::offsetted_address(Register r, Register r1, 3025 Address::extend ext, int offset, int size) { 3026 if (offset || (ext.shift() % size != 0)) { 3027 lea(rscratch2, Address(r, r1, ext)); 3028 return Address(rscratch2, offset); 3029 } else { 3030 return Address(r, r1, ext); 3031 } 3032 } 3033 3034 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3035 { 3036 assert(offset >= 0, "spill to negative address?"); 3037 // Offset reachable ? 3038 // Not aligned - 9 bits signed offset 3039 // Aligned - 12 bits unsigned offset shifted 3040 Register base = sp; 3041 if ((offset & (size-1)) && offset >= (1<<8)) { 3042 add(tmp, base, offset & ((1<<12)-1)); 3043 base = tmp; 3044 offset &= -1u<<12; 3045 } 3046 3047 if (offset >= (1<<12) * size) { 3048 add(tmp, base, offset & (((1<<12)-1)<<12)); 3049 base = tmp; 3050 offset &= ~(((1<<12)-1)<<12); 3051 } 3052 3053 return Address(base, offset); 3054 } 3055 3056 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3057 assert(offset >= 0, "spill to negative address?"); 3058 3059 Register base = sp; 3060 3061 // An immediate offset in the range 0 to 255 which is multiplied 3062 // by the current vector or predicate register size in bytes. 3063 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3064 return Address(base, offset / sve_reg_size_in_bytes); 3065 } 3066 3067 add(tmp, base, offset); 3068 return Address(tmp); 3069 } 3070 3071 // Checks whether offset is aligned. 3072 // Returns true if it is, else false. 3073 bool MacroAssembler::merge_alignment_check(Register base, 3074 size_t size, 3075 int64_t cur_offset, 3076 int64_t prev_offset) const { 3077 if (AvoidUnalignedAccesses) { 3078 if (base == sp) { 3079 // Checks whether low offset if aligned to pair of registers. 3080 int64_t pair_mask = size * 2 - 1; 3081 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3082 return (offset & pair_mask) == 0; 3083 } else { // If base is not sp, we can't guarantee the access is aligned. 3084 return false; 3085 } 3086 } else { 3087 int64_t mask = size - 1; 3088 // Load/store pair instruction only supports element size aligned offset. 3089 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3090 } 3091 } 3092 3093 // Checks whether current and previous loads/stores can be merged. 3094 // Returns true if it can be merged, else false. 3095 bool MacroAssembler::ldst_can_merge(Register rt, 3096 const Address &adr, 3097 size_t cur_size_in_bytes, 3098 bool is_store) const { 3099 address prev = pc() - NativeInstruction::instruction_size; 3100 address last = code()->last_insn(); 3101 3102 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3103 return false; 3104 } 3105 3106 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3107 return false; 3108 } 3109 3110 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3111 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3112 3113 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3114 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3115 3116 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3117 return false; 3118 } 3119 3120 int64_t max_offset = 63 * prev_size_in_bytes; 3121 int64_t min_offset = -64 * prev_size_in_bytes; 3122 3123 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3124 3125 // Only same base can be merged. 3126 if (adr.base() != prev_ldst->base()) { 3127 return false; 3128 } 3129 3130 int64_t cur_offset = adr.offset(); 3131 int64_t prev_offset = prev_ldst->offset(); 3132 size_t diff = abs(cur_offset - prev_offset); 3133 if (diff != prev_size_in_bytes) { 3134 return false; 3135 } 3136 3137 // Following cases can not be merged: 3138 // ldr x2, [x2, #8] 3139 // ldr x3, [x2, #16] 3140 // or: 3141 // ldr x2, [x3, #8] 3142 // ldr x2, [x3, #16] 3143 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3144 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3145 return false; 3146 } 3147 3148 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3149 // Offset range must be in ldp/stp instruction's range. 3150 if (low_offset > max_offset || low_offset < min_offset) { 3151 return false; 3152 } 3153 3154 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3155 return true; 3156 } 3157 3158 return false; 3159 } 3160 3161 // Merge current load/store with previous load/store into ldp/stp. 3162 void MacroAssembler::merge_ldst(Register rt, 3163 const Address &adr, 3164 size_t cur_size_in_bytes, 3165 bool is_store) { 3166 3167 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3168 3169 Register rt_low, rt_high; 3170 address prev = pc() - NativeInstruction::instruction_size; 3171 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3172 3173 int64_t offset; 3174 3175 if (adr.offset() < prev_ldst->offset()) { 3176 offset = adr.offset(); 3177 rt_low = rt; 3178 rt_high = prev_ldst->target(); 3179 } else { 3180 offset = prev_ldst->offset(); 3181 rt_low = prev_ldst->target(); 3182 rt_high = rt; 3183 } 3184 3185 Address adr_p = Address(prev_ldst->base(), offset); 3186 // Overwrite previous generated binary. 3187 code_section()->set_end(prev); 3188 3189 const size_t sz = prev_ldst->size_in_bytes(); 3190 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3191 if (!is_store) { 3192 BLOCK_COMMENT("merged ldr pair"); 3193 if (sz == 8) { 3194 ldp(rt_low, rt_high, adr_p); 3195 } else { 3196 ldpw(rt_low, rt_high, adr_p); 3197 } 3198 } else { 3199 BLOCK_COMMENT("merged str pair"); 3200 if (sz == 8) { 3201 stp(rt_low, rt_high, adr_p); 3202 } else { 3203 stpw(rt_low, rt_high, adr_p); 3204 } 3205 } 3206 } 3207 3208 /** 3209 * Multiply 64 bit by 64 bit first loop. 3210 */ 3211 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3212 Register y, Register y_idx, Register z, 3213 Register carry, Register product, 3214 Register idx, Register kdx) { 3215 // 3216 // jlong carry, x[], y[], z[]; 3217 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3218 // huge_128 product = y[idx] * x[xstart] + carry; 3219 // z[kdx] = (jlong)product; 3220 // carry = (jlong)(product >>> 64); 3221 // } 3222 // z[xstart] = carry; 3223 // 3224 3225 Label L_first_loop, L_first_loop_exit; 3226 Label L_one_x, L_one_y, L_multiply; 3227 3228 subsw(xstart, xstart, 1); 3229 br(Assembler::MI, L_one_x); 3230 3231 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3232 ldr(x_xstart, Address(rscratch1)); 3233 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3234 3235 bind(L_first_loop); 3236 subsw(idx, idx, 1); 3237 br(Assembler::MI, L_first_loop_exit); 3238 subsw(idx, idx, 1); 3239 br(Assembler::MI, L_one_y); 3240 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3241 ldr(y_idx, Address(rscratch1)); 3242 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3243 bind(L_multiply); 3244 3245 // AArch64 has a multiply-accumulate instruction that we can't use 3246 // here because it has no way to process carries, so we have to use 3247 // separate add and adc instructions. Bah. 3248 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3249 mul(product, x_xstart, y_idx); 3250 adds(product, product, carry); 3251 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3252 3253 subw(kdx, kdx, 2); 3254 ror(product, product, 32); // back to big-endian 3255 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3256 3257 b(L_first_loop); 3258 3259 bind(L_one_y); 3260 ldrw(y_idx, Address(y, 0)); 3261 b(L_multiply); 3262 3263 bind(L_one_x); 3264 ldrw(x_xstart, Address(x, 0)); 3265 b(L_first_loop); 3266 3267 bind(L_first_loop_exit); 3268 } 3269 3270 /** 3271 * Multiply 128 bit by 128. Unrolled inner loop. 3272 * 3273 */ 3274 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3275 Register carry, Register carry2, 3276 Register idx, Register jdx, 3277 Register yz_idx1, Register yz_idx2, 3278 Register tmp, Register tmp3, Register tmp4, 3279 Register tmp6, Register product_hi) { 3280 3281 // jlong carry, x[], y[], z[]; 3282 // int kdx = ystart+1; 3283 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3284 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3285 // jlong carry2 = (jlong)(tmp3 >>> 64); 3286 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3287 // carry = (jlong)(tmp4 >>> 64); 3288 // z[kdx+idx+1] = (jlong)tmp3; 3289 // z[kdx+idx] = (jlong)tmp4; 3290 // } 3291 // idx += 2; 3292 // if (idx > 0) { 3293 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3294 // z[kdx+idx] = (jlong)yz_idx1; 3295 // carry = (jlong)(yz_idx1 >>> 64); 3296 // } 3297 // 3298 3299 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3300 3301 lsrw(jdx, idx, 2); 3302 3303 bind(L_third_loop); 3304 3305 subsw(jdx, jdx, 1); 3306 br(Assembler::MI, L_third_loop_exit); 3307 subw(idx, idx, 4); 3308 3309 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3310 3311 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3312 3313 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3314 3315 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3316 ror(yz_idx2, yz_idx2, 32); 3317 3318 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3319 3320 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3321 umulh(tmp4, product_hi, yz_idx1); 3322 3323 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3324 ror(rscratch2, rscratch2, 32); 3325 3326 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3327 umulh(carry2, product_hi, yz_idx2); 3328 3329 // propagate sum of both multiplications into carry:tmp4:tmp3 3330 adds(tmp3, tmp3, carry); 3331 adc(tmp4, tmp4, zr); 3332 adds(tmp3, tmp3, rscratch1); 3333 adcs(tmp4, tmp4, tmp); 3334 adc(carry, carry2, zr); 3335 adds(tmp4, tmp4, rscratch2); 3336 adc(carry, carry, zr); 3337 3338 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3339 ror(tmp4, tmp4, 32); 3340 stp(tmp4, tmp3, Address(tmp6, 0)); 3341 3342 b(L_third_loop); 3343 bind (L_third_loop_exit); 3344 3345 andw (idx, idx, 0x3); 3346 cbz(idx, L_post_third_loop_done); 3347 3348 Label L_check_1; 3349 subsw(idx, idx, 2); 3350 br(Assembler::MI, L_check_1); 3351 3352 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3353 ldr(yz_idx1, Address(rscratch1, 0)); 3354 ror(yz_idx1, yz_idx1, 32); 3355 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3356 umulh(tmp4, product_hi, yz_idx1); 3357 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3358 ldr(yz_idx2, Address(rscratch1, 0)); 3359 ror(yz_idx2, yz_idx2, 32); 3360 3361 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3362 3363 ror(tmp3, tmp3, 32); 3364 str(tmp3, Address(rscratch1, 0)); 3365 3366 bind (L_check_1); 3367 3368 andw (idx, idx, 0x1); 3369 subsw(idx, idx, 1); 3370 br(Assembler::MI, L_post_third_loop_done); 3371 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3372 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3373 umulh(carry2, tmp4, product_hi); 3374 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3375 3376 add2_with_carry(carry2, tmp3, tmp4, carry); 3377 3378 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3379 extr(carry, carry2, tmp3, 32); 3380 3381 bind(L_post_third_loop_done); 3382 } 3383 3384 /** 3385 * Code for BigInteger::multiplyToLen() intrinsic. 3386 * 3387 * r0: x 3388 * r1: xlen 3389 * r2: y 3390 * r3: ylen 3391 * r4: z 3392 * r5: zlen 3393 * r10: tmp1 3394 * r11: tmp2 3395 * r12: tmp3 3396 * r13: tmp4 3397 * r14: tmp5 3398 * r15: tmp6 3399 * r16: tmp7 3400 * 3401 */ 3402 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3403 Register z, Register zlen, 3404 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3405 Register tmp5, Register tmp6, Register product_hi) { 3406 3407 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 3408 3409 const Register idx = tmp1; 3410 const Register kdx = tmp2; 3411 const Register xstart = tmp3; 3412 3413 const Register y_idx = tmp4; 3414 const Register carry = tmp5; 3415 const Register product = xlen; 3416 const Register x_xstart = zlen; // reuse register 3417 3418 // First Loop. 3419 // 3420 // final static long LONG_MASK = 0xffffffffL; 3421 // int xstart = xlen - 1; 3422 // int ystart = ylen - 1; 3423 // long carry = 0; 3424 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3425 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3426 // z[kdx] = (int)product; 3427 // carry = product >>> 32; 3428 // } 3429 // z[xstart] = (int)carry; 3430 // 3431 3432 movw(idx, ylen); // idx = ylen; 3433 movw(kdx, zlen); // kdx = xlen+ylen; 3434 mov(carry, zr); // carry = 0; 3435 3436 Label L_done; 3437 3438 movw(xstart, xlen); 3439 subsw(xstart, xstart, 1); 3440 br(Assembler::MI, L_done); 3441 3442 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3443 3444 Label L_second_loop; 3445 cbzw(kdx, L_second_loop); 3446 3447 Label L_carry; 3448 subw(kdx, kdx, 1); 3449 cbzw(kdx, L_carry); 3450 3451 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3452 lsr(carry, carry, 32); 3453 subw(kdx, kdx, 1); 3454 3455 bind(L_carry); 3456 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3457 3458 // Second and third (nested) loops. 3459 // 3460 // for (int i = xstart-1; i >= 0; i--) { // Second loop 3461 // carry = 0; 3462 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 3463 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 3464 // (z[k] & LONG_MASK) + carry; 3465 // z[k] = (int)product; 3466 // carry = product >>> 32; 3467 // } 3468 // z[i] = (int)carry; 3469 // } 3470 // 3471 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 3472 3473 const Register jdx = tmp1; 3474 3475 bind(L_second_loop); 3476 mov(carry, zr); // carry = 0; 3477 movw(jdx, ylen); // j = ystart+1 3478 3479 subsw(xstart, xstart, 1); // i = xstart-1; 3480 br(Assembler::MI, L_done); 3481 3482 str(z, Address(pre(sp, -4 * wordSize))); 3483 3484 Label L_last_x; 3485 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 3486 subsw(xstart, xstart, 1); // i = xstart-1; 3487 br(Assembler::MI, L_last_x); 3488 3489 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 3490 ldr(product_hi, Address(rscratch1)); 3491 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 3492 3493 Label L_third_loop_prologue; 3494 bind(L_third_loop_prologue); 3495 3496 str(ylen, Address(sp, wordSize)); 3497 stp(x, xstart, Address(sp, 2 * wordSize)); 3498 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 3499 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 3500 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 3501 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 3502 3503 addw(tmp3, xlen, 1); 3504 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 3505 subsw(tmp3, tmp3, 1); 3506 br(Assembler::MI, L_done); 3507 3508 lsr(carry, carry, 32); 3509 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 3510 b(L_second_loop); 3511 3512 // Next infrequent code is moved outside loops. 3513 bind(L_last_x); 3514 ldrw(product_hi, Address(x, 0)); 3515 b(L_third_loop_prologue); 3516 3517 bind(L_done); 3518 } 3519 3520 // Code for BigInteger::mulAdd intrinsic 3521 // out = r0 3522 // in = r1 3523 // offset = r2 (already out.length-offset) 3524 // len = r3 3525 // k = r4 3526 // 3527 // pseudo code from java implementation: 3528 // carry = 0; 3529 // offset = out.length-offset - 1; 3530 // for (int j=len-1; j >= 0; j--) { 3531 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 3532 // out[offset--] = (int)product; 3533 // carry = product >>> 32; 3534 // } 3535 // return (int)carry; 3536 void MacroAssembler::mul_add(Register out, Register in, Register offset, 3537 Register len, Register k) { 3538 Label LOOP, END; 3539 // pre-loop 3540 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 3541 csel(out, zr, out, Assembler::EQ); 3542 br(Assembler::EQ, END); 3543 add(in, in, len, LSL, 2); // in[j+1] address 3544 add(offset, out, offset, LSL, 2); // out[offset + 1] address 3545 mov(out, zr); // used to keep carry now 3546 BIND(LOOP); 3547 ldrw(rscratch1, Address(pre(in, -4))); 3548 madd(rscratch1, rscratch1, k, out); 3549 ldrw(rscratch2, Address(pre(offset, -4))); 3550 add(rscratch1, rscratch1, rscratch2); 3551 strw(rscratch1, Address(offset)); 3552 lsr(out, rscratch1, 32); 3553 subs(len, len, 1); 3554 br(Assembler::NE, LOOP); 3555 BIND(END); 3556 } 3557 3558 /** 3559 * Emits code to update CRC-32 with a byte value according to constants in table 3560 * 3561 * @param [in,out]crc Register containing the crc. 3562 * @param [in]val Register containing the byte to fold into the CRC. 3563 * @param [in]table Register containing the table of crc constants. 3564 * 3565 * uint32_t crc; 3566 * val = crc_table[(val ^ crc) & 0xFF]; 3567 * crc = val ^ (crc >> 8); 3568 * 3569 */ 3570 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 3571 eor(val, val, crc); 3572 andr(val, val, 0xff); 3573 ldrw(val, Address(table, val, Address::lsl(2))); 3574 eor(crc, val, crc, Assembler::LSR, 8); 3575 } 3576 3577 /** 3578 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 3579 * 3580 * @param [in,out]crc Register containing the crc. 3581 * @param [in]v Register containing the 32-bit to fold into the CRC. 3582 * @param [in]table0 Register containing table 0 of crc constants. 3583 * @param [in]table1 Register containing table 1 of crc constants. 3584 * @param [in]table2 Register containing table 2 of crc constants. 3585 * @param [in]table3 Register containing table 3 of crc constants. 3586 * 3587 * uint32_t crc; 3588 * v = crc ^ v 3589 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 3590 * 3591 */ 3592 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 3593 Register table0, Register table1, Register table2, Register table3, 3594 bool upper) { 3595 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 3596 uxtb(tmp, v); 3597 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 3598 ubfx(tmp, v, 8, 8); 3599 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 3600 eor(crc, crc, tmp); 3601 ubfx(tmp, v, 16, 8); 3602 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 3603 eor(crc, crc, tmp); 3604 ubfx(tmp, v, 24, 8); 3605 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 3606 eor(crc, crc, tmp); 3607 } 3608 3609 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 3610 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 3611 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 3612 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 3613 3614 subs(tmp0, len, 384); 3615 mvnw(crc, crc); 3616 br(Assembler::GE, CRC_by128_pre); 3617 BIND(CRC_less128); 3618 subs(len, len, 32); 3619 br(Assembler::GE, CRC_by32_loop); 3620 BIND(CRC_less32); 3621 adds(len, len, 32 - 4); 3622 br(Assembler::GE, CRC_by4_loop); 3623 adds(len, len, 4); 3624 br(Assembler::GT, CRC_by1_loop); 3625 b(L_exit); 3626 3627 BIND(CRC_by32_loop); 3628 ldp(tmp0, tmp1, Address(buf)); 3629 crc32x(crc, crc, tmp0); 3630 ldp(tmp2, tmp3, Address(buf, 16)); 3631 crc32x(crc, crc, tmp1); 3632 add(buf, buf, 32); 3633 crc32x(crc, crc, tmp2); 3634 subs(len, len, 32); 3635 crc32x(crc, crc, tmp3); 3636 br(Assembler::GE, CRC_by32_loop); 3637 cmn(len, (u1)32); 3638 br(Assembler::NE, CRC_less32); 3639 b(L_exit); 3640 3641 BIND(CRC_by4_loop); 3642 ldrw(tmp0, Address(post(buf, 4))); 3643 subs(len, len, 4); 3644 crc32w(crc, crc, tmp0); 3645 br(Assembler::GE, CRC_by4_loop); 3646 adds(len, len, 4); 3647 br(Assembler::LE, L_exit); 3648 BIND(CRC_by1_loop); 3649 ldrb(tmp0, Address(post(buf, 1))); 3650 subs(len, len, 1); 3651 crc32b(crc, crc, tmp0); 3652 br(Assembler::GT, CRC_by1_loop); 3653 b(L_exit); 3654 3655 BIND(CRC_by128_pre); 3656 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 3657 4*256*sizeof(juint) + 8*sizeof(juint)); 3658 mov(crc, 0); 3659 crc32x(crc, crc, tmp0); 3660 crc32x(crc, crc, tmp1); 3661 3662 cbnz(len, CRC_less128); 3663 3664 BIND(L_exit); 3665 mvnw(crc, crc); 3666 } 3667 3668 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 3669 Register len, Register tmp0, Register tmp1, Register tmp2, 3670 Register tmp3) { 3671 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 3672 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 3673 3674 mvnw(crc, crc); 3675 3676 subs(len, len, 128); 3677 br(Assembler::GE, CRC_by64_pre); 3678 BIND(CRC_less64); 3679 adds(len, len, 128-32); 3680 br(Assembler::GE, CRC_by32_loop); 3681 BIND(CRC_less32); 3682 adds(len, len, 32-4); 3683 br(Assembler::GE, CRC_by4_loop); 3684 adds(len, len, 4); 3685 br(Assembler::GT, CRC_by1_loop); 3686 b(L_exit); 3687 3688 BIND(CRC_by32_loop); 3689 ldp(tmp0, tmp1, Address(post(buf, 16))); 3690 subs(len, len, 32); 3691 crc32x(crc, crc, tmp0); 3692 ldr(tmp2, Address(post(buf, 8))); 3693 crc32x(crc, crc, tmp1); 3694 ldr(tmp3, Address(post(buf, 8))); 3695 crc32x(crc, crc, tmp2); 3696 crc32x(crc, crc, tmp3); 3697 br(Assembler::GE, CRC_by32_loop); 3698 cmn(len, (u1)32); 3699 br(Assembler::NE, CRC_less32); 3700 b(L_exit); 3701 3702 BIND(CRC_by4_loop); 3703 ldrw(tmp0, Address(post(buf, 4))); 3704 subs(len, len, 4); 3705 crc32w(crc, crc, tmp0); 3706 br(Assembler::GE, CRC_by4_loop); 3707 adds(len, len, 4); 3708 br(Assembler::LE, L_exit); 3709 BIND(CRC_by1_loop); 3710 ldrb(tmp0, Address(post(buf, 1))); 3711 subs(len, len, 1); 3712 crc32b(crc, crc, tmp0); 3713 br(Assembler::GT, CRC_by1_loop); 3714 b(L_exit); 3715 3716 BIND(CRC_by64_pre); 3717 sub(buf, buf, 8); 3718 ldp(tmp0, tmp1, Address(buf, 8)); 3719 crc32x(crc, crc, tmp0); 3720 ldr(tmp2, Address(buf, 24)); 3721 crc32x(crc, crc, tmp1); 3722 ldr(tmp3, Address(buf, 32)); 3723 crc32x(crc, crc, tmp2); 3724 ldr(tmp0, Address(buf, 40)); 3725 crc32x(crc, crc, tmp3); 3726 ldr(tmp1, Address(buf, 48)); 3727 crc32x(crc, crc, tmp0); 3728 ldr(tmp2, Address(buf, 56)); 3729 crc32x(crc, crc, tmp1); 3730 ldr(tmp3, Address(pre(buf, 64))); 3731 3732 b(CRC_by64_loop); 3733 3734 align(CodeEntryAlignment); 3735 BIND(CRC_by64_loop); 3736 subs(len, len, 64); 3737 crc32x(crc, crc, tmp2); 3738 ldr(tmp0, Address(buf, 8)); 3739 crc32x(crc, crc, tmp3); 3740 ldr(tmp1, Address(buf, 16)); 3741 crc32x(crc, crc, tmp0); 3742 ldr(tmp2, Address(buf, 24)); 3743 crc32x(crc, crc, tmp1); 3744 ldr(tmp3, Address(buf, 32)); 3745 crc32x(crc, crc, tmp2); 3746 ldr(tmp0, Address(buf, 40)); 3747 crc32x(crc, crc, tmp3); 3748 ldr(tmp1, Address(buf, 48)); 3749 crc32x(crc, crc, tmp0); 3750 ldr(tmp2, Address(buf, 56)); 3751 crc32x(crc, crc, tmp1); 3752 ldr(tmp3, Address(pre(buf, 64))); 3753 br(Assembler::GE, CRC_by64_loop); 3754 3755 // post-loop 3756 crc32x(crc, crc, tmp2); 3757 crc32x(crc, crc, tmp3); 3758 3759 sub(len, len, 64); 3760 add(buf, buf, 8); 3761 cmn(len, (u1)128); 3762 br(Assembler::NE, CRC_less64); 3763 BIND(L_exit); 3764 mvnw(crc, crc); 3765 } 3766 3767 /** 3768 * @param crc register containing existing CRC (32-bit) 3769 * @param buf register pointing to input byte buffer (byte*) 3770 * @param len register containing number of bytes 3771 * @param table register that will contain address of CRC table 3772 * @param tmp scratch register 3773 */ 3774 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 3775 Register table0, Register table1, Register table2, Register table3, 3776 Register tmp, Register tmp2, Register tmp3) { 3777 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 3778 3779 if (UseCryptoPmullForCRC32) { 3780 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 3781 return; 3782 } 3783 3784 if (UseCRC32) { 3785 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 3786 return; 3787 } 3788 3789 mvnw(crc, crc); 3790 3791 { 3792 uint64_t offset; 3793 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 3794 add(table0, table0, offset); 3795 } 3796 add(table1, table0, 1*256*sizeof(juint)); 3797 add(table2, table0, 2*256*sizeof(juint)); 3798 add(table3, table0, 3*256*sizeof(juint)); 3799 3800 if (UseNeon) { 3801 cmp(len, (u1)64); 3802 br(Assembler::LT, L_by16); 3803 eor(v16, T16B, v16, v16); 3804 3805 Label L_fold; 3806 3807 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 3808 3809 ld1(v0, v1, T2D, post(buf, 32)); 3810 ld1r(v4, T2D, post(tmp, 8)); 3811 ld1r(v5, T2D, post(tmp, 8)); 3812 ld1r(v6, T2D, post(tmp, 8)); 3813 ld1r(v7, T2D, post(tmp, 8)); 3814 mov(v16, S, 0, crc); 3815 3816 eor(v0, T16B, v0, v16); 3817 sub(len, len, 64); 3818 3819 BIND(L_fold); 3820 pmull(v22, T8H, v0, v5, T8B); 3821 pmull(v20, T8H, v0, v7, T8B); 3822 pmull(v23, T8H, v0, v4, T8B); 3823 pmull(v21, T8H, v0, v6, T8B); 3824 3825 pmull2(v18, T8H, v0, v5, T16B); 3826 pmull2(v16, T8H, v0, v7, T16B); 3827 pmull2(v19, T8H, v0, v4, T16B); 3828 pmull2(v17, T8H, v0, v6, T16B); 3829 3830 uzp1(v24, T8H, v20, v22); 3831 uzp2(v25, T8H, v20, v22); 3832 eor(v20, T16B, v24, v25); 3833 3834 uzp1(v26, T8H, v16, v18); 3835 uzp2(v27, T8H, v16, v18); 3836 eor(v16, T16B, v26, v27); 3837 3838 ushll2(v22, T4S, v20, T8H, 8); 3839 ushll(v20, T4S, v20, T4H, 8); 3840 3841 ushll2(v18, T4S, v16, T8H, 8); 3842 ushll(v16, T4S, v16, T4H, 8); 3843 3844 eor(v22, T16B, v23, v22); 3845 eor(v18, T16B, v19, v18); 3846 eor(v20, T16B, v21, v20); 3847 eor(v16, T16B, v17, v16); 3848 3849 uzp1(v17, T2D, v16, v20); 3850 uzp2(v21, T2D, v16, v20); 3851 eor(v17, T16B, v17, v21); 3852 3853 ushll2(v20, T2D, v17, T4S, 16); 3854 ushll(v16, T2D, v17, T2S, 16); 3855 3856 eor(v20, T16B, v20, v22); 3857 eor(v16, T16B, v16, v18); 3858 3859 uzp1(v17, T2D, v20, v16); 3860 uzp2(v21, T2D, v20, v16); 3861 eor(v28, T16B, v17, v21); 3862 3863 pmull(v22, T8H, v1, v5, T8B); 3864 pmull(v20, T8H, v1, v7, T8B); 3865 pmull(v23, T8H, v1, v4, T8B); 3866 pmull(v21, T8H, v1, v6, T8B); 3867 3868 pmull2(v18, T8H, v1, v5, T16B); 3869 pmull2(v16, T8H, v1, v7, T16B); 3870 pmull2(v19, T8H, v1, v4, T16B); 3871 pmull2(v17, T8H, v1, v6, T16B); 3872 3873 ld1(v0, v1, T2D, post(buf, 32)); 3874 3875 uzp1(v24, T8H, v20, v22); 3876 uzp2(v25, T8H, v20, v22); 3877 eor(v20, T16B, v24, v25); 3878 3879 uzp1(v26, T8H, v16, v18); 3880 uzp2(v27, T8H, v16, v18); 3881 eor(v16, T16B, v26, v27); 3882 3883 ushll2(v22, T4S, v20, T8H, 8); 3884 ushll(v20, T4S, v20, T4H, 8); 3885 3886 ushll2(v18, T4S, v16, T8H, 8); 3887 ushll(v16, T4S, v16, T4H, 8); 3888 3889 eor(v22, T16B, v23, v22); 3890 eor(v18, T16B, v19, v18); 3891 eor(v20, T16B, v21, v20); 3892 eor(v16, T16B, v17, v16); 3893 3894 uzp1(v17, T2D, v16, v20); 3895 uzp2(v21, T2D, v16, v20); 3896 eor(v16, T16B, v17, v21); 3897 3898 ushll2(v20, T2D, v16, T4S, 16); 3899 ushll(v16, T2D, v16, T2S, 16); 3900 3901 eor(v20, T16B, v22, v20); 3902 eor(v16, T16B, v16, v18); 3903 3904 uzp1(v17, T2D, v20, v16); 3905 uzp2(v21, T2D, v20, v16); 3906 eor(v20, T16B, v17, v21); 3907 3908 shl(v16, T2D, v28, 1); 3909 shl(v17, T2D, v20, 1); 3910 3911 eor(v0, T16B, v0, v16); 3912 eor(v1, T16B, v1, v17); 3913 3914 subs(len, len, 32); 3915 br(Assembler::GE, L_fold); 3916 3917 mov(crc, 0); 3918 mov(tmp, v0, D, 0); 3919 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3920 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3921 mov(tmp, v0, D, 1); 3922 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3923 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3924 mov(tmp, v1, D, 0); 3925 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3926 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3927 mov(tmp, v1, D, 1); 3928 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3929 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3930 3931 add(len, len, 32); 3932 } 3933 3934 BIND(L_by16); 3935 subs(len, len, 16); 3936 br(Assembler::GE, L_by16_loop); 3937 adds(len, len, 16-4); 3938 br(Assembler::GE, L_by4_loop); 3939 adds(len, len, 4); 3940 br(Assembler::GT, L_by1_loop); 3941 b(L_exit); 3942 3943 BIND(L_by4_loop); 3944 ldrw(tmp, Address(post(buf, 4))); 3945 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 3946 subs(len, len, 4); 3947 br(Assembler::GE, L_by4_loop); 3948 adds(len, len, 4); 3949 br(Assembler::LE, L_exit); 3950 BIND(L_by1_loop); 3951 subs(len, len, 1); 3952 ldrb(tmp, Address(post(buf, 1))); 3953 update_byte_crc32(crc, tmp, table0); 3954 br(Assembler::GT, L_by1_loop); 3955 b(L_exit); 3956 3957 align(CodeEntryAlignment); 3958 BIND(L_by16_loop); 3959 subs(len, len, 16); 3960 ldp(tmp, tmp3, Address(post(buf, 16))); 3961 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3962 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3963 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 3964 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 3965 br(Assembler::GE, L_by16_loop); 3966 adds(len, len, 16-4); 3967 br(Assembler::GE, L_by4_loop); 3968 adds(len, len, 4); 3969 br(Assembler::GT, L_by1_loop); 3970 BIND(L_exit); 3971 mvnw(crc, crc); 3972 } 3973 3974 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 3975 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 3976 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 3977 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 3978 3979 subs(tmp0, len, 384); 3980 br(Assembler::GE, CRC_by128_pre); 3981 BIND(CRC_less128); 3982 subs(len, len, 32); 3983 br(Assembler::GE, CRC_by32_loop); 3984 BIND(CRC_less32); 3985 adds(len, len, 32 - 4); 3986 br(Assembler::GE, CRC_by4_loop); 3987 adds(len, len, 4); 3988 br(Assembler::GT, CRC_by1_loop); 3989 b(L_exit); 3990 3991 BIND(CRC_by32_loop); 3992 ldp(tmp0, tmp1, Address(buf)); 3993 crc32cx(crc, crc, tmp0); 3994 ldr(tmp2, Address(buf, 16)); 3995 crc32cx(crc, crc, tmp1); 3996 ldr(tmp3, Address(buf, 24)); 3997 crc32cx(crc, crc, tmp2); 3998 add(buf, buf, 32); 3999 subs(len, len, 32); 4000 crc32cx(crc, crc, tmp3); 4001 br(Assembler::GE, CRC_by32_loop); 4002 cmn(len, (u1)32); 4003 br(Assembler::NE, CRC_less32); 4004 b(L_exit); 4005 4006 BIND(CRC_by4_loop); 4007 ldrw(tmp0, Address(post(buf, 4))); 4008 subs(len, len, 4); 4009 crc32cw(crc, crc, tmp0); 4010 br(Assembler::GE, CRC_by4_loop); 4011 adds(len, len, 4); 4012 br(Assembler::LE, L_exit); 4013 BIND(CRC_by1_loop); 4014 ldrb(tmp0, Address(post(buf, 1))); 4015 subs(len, len, 1); 4016 crc32cb(crc, crc, tmp0); 4017 br(Assembler::GT, CRC_by1_loop); 4018 b(L_exit); 4019 4020 BIND(CRC_by128_pre); 4021 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4022 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4023 mov(crc, 0); 4024 crc32cx(crc, crc, tmp0); 4025 crc32cx(crc, crc, tmp1); 4026 4027 cbnz(len, CRC_less128); 4028 4029 BIND(L_exit); 4030 } 4031 4032 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4033 Register len, Register tmp0, Register tmp1, Register tmp2, 4034 Register tmp3) { 4035 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4036 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4037 4038 subs(len, len, 128); 4039 br(Assembler::GE, CRC_by64_pre); 4040 BIND(CRC_less64); 4041 adds(len, len, 128-32); 4042 br(Assembler::GE, CRC_by32_loop); 4043 BIND(CRC_less32); 4044 adds(len, len, 32-4); 4045 br(Assembler::GE, CRC_by4_loop); 4046 adds(len, len, 4); 4047 br(Assembler::GT, CRC_by1_loop); 4048 b(L_exit); 4049 4050 BIND(CRC_by32_loop); 4051 ldp(tmp0, tmp1, Address(post(buf, 16))); 4052 subs(len, len, 32); 4053 crc32cx(crc, crc, tmp0); 4054 ldr(tmp2, Address(post(buf, 8))); 4055 crc32cx(crc, crc, tmp1); 4056 ldr(tmp3, Address(post(buf, 8))); 4057 crc32cx(crc, crc, tmp2); 4058 crc32cx(crc, crc, tmp3); 4059 br(Assembler::GE, CRC_by32_loop); 4060 cmn(len, (u1)32); 4061 br(Assembler::NE, CRC_less32); 4062 b(L_exit); 4063 4064 BIND(CRC_by4_loop); 4065 ldrw(tmp0, Address(post(buf, 4))); 4066 subs(len, len, 4); 4067 crc32cw(crc, crc, tmp0); 4068 br(Assembler::GE, CRC_by4_loop); 4069 adds(len, len, 4); 4070 br(Assembler::LE, L_exit); 4071 BIND(CRC_by1_loop); 4072 ldrb(tmp0, Address(post(buf, 1))); 4073 subs(len, len, 1); 4074 crc32cb(crc, crc, tmp0); 4075 br(Assembler::GT, CRC_by1_loop); 4076 b(L_exit); 4077 4078 BIND(CRC_by64_pre); 4079 sub(buf, buf, 8); 4080 ldp(tmp0, tmp1, Address(buf, 8)); 4081 crc32cx(crc, crc, tmp0); 4082 ldr(tmp2, Address(buf, 24)); 4083 crc32cx(crc, crc, tmp1); 4084 ldr(tmp3, Address(buf, 32)); 4085 crc32cx(crc, crc, tmp2); 4086 ldr(tmp0, Address(buf, 40)); 4087 crc32cx(crc, crc, tmp3); 4088 ldr(tmp1, Address(buf, 48)); 4089 crc32cx(crc, crc, tmp0); 4090 ldr(tmp2, Address(buf, 56)); 4091 crc32cx(crc, crc, tmp1); 4092 ldr(tmp3, Address(pre(buf, 64))); 4093 4094 b(CRC_by64_loop); 4095 4096 align(CodeEntryAlignment); 4097 BIND(CRC_by64_loop); 4098 subs(len, len, 64); 4099 crc32cx(crc, crc, tmp2); 4100 ldr(tmp0, Address(buf, 8)); 4101 crc32cx(crc, crc, tmp3); 4102 ldr(tmp1, Address(buf, 16)); 4103 crc32cx(crc, crc, tmp0); 4104 ldr(tmp2, Address(buf, 24)); 4105 crc32cx(crc, crc, tmp1); 4106 ldr(tmp3, Address(buf, 32)); 4107 crc32cx(crc, crc, tmp2); 4108 ldr(tmp0, Address(buf, 40)); 4109 crc32cx(crc, crc, tmp3); 4110 ldr(tmp1, Address(buf, 48)); 4111 crc32cx(crc, crc, tmp0); 4112 ldr(tmp2, Address(buf, 56)); 4113 crc32cx(crc, crc, tmp1); 4114 ldr(tmp3, Address(pre(buf, 64))); 4115 br(Assembler::GE, CRC_by64_loop); 4116 4117 // post-loop 4118 crc32cx(crc, crc, tmp2); 4119 crc32cx(crc, crc, tmp3); 4120 4121 sub(len, len, 64); 4122 add(buf, buf, 8); 4123 cmn(len, (u1)128); 4124 br(Assembler::NE, CRC_less64); 4125 BIND(L_exit); 4126 } 4127 4128 /** 4129 * @param crc register containing existing CRC (32-bit) 4130 * @param buf register pointing to input byte buffer (byte*) 4131 * @param len register containing number of bytes 4132 * @param table register that will contain address of CRC table 4133 * @param tmp scratch register 4134 */ 4135 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4136 Register table0, Register table1, Register table2, Register table3, 4137 Register tmp, Register tmp2, Register tmp3) { 4138 if (UseCryptoPmullForCRC32) { 4139 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4140 } else { 4141 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4142 } 4143 } 4144 4145 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4146 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4147 Label CRC_by128_loop; 4148 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4149 4150 sub(len, len, 256); 4151 Register table = tmp0; 4152 { 4153 uint64_t offset; 4154 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4155 add(table, table, offset); 4156 } 4157 add(table, table, table_offset); 4158 4159 // Registers v0..v7 are used as data registers. 4160 // Registers v16..v31 are used as tmp registers. 4161 sub(buf, buf, 0x10); 4162 ldrq(v0, Address(buf, 0x10)); 4163 ldrq(v1, Address(buf, 0x20)); 4164 ldrq(v2, Address(buf, 0x30)); 4165 ldrq(v3, Address(buf, 0x40)); 4166 ldrq(v4, Address(buf, 0x50)); 4167 ldrq(v5, Address(buf, 0x60)); 4168 ldrq(v6, Address(buf, 0x70)); 4169 ldrq(v7, Address(pre(buf, 0x80))); 4170 4171 movi(v31, T4S, 0); 4172 mov(v31, S, 0, crc); 4173 eor(v0, T16B, v0, v31); 4174 4175 // Register v16 contains constants from the crc table. 4176 ldrq(v16, Address(table)); 4177 b(CRC_by128_loop); 4178 4179 align(OptoLoopAlignment); 4180 BIND(CRC_by128_loop); 4181 pmull (v17, T1Q, v0, v16, T1D); 4182 pmull2(v18, T1Q, v0, v16, T2D); 4183 ldrq(v0, Address(buf, 0x10)); 4184 eor3(v0, T16B, v17, v18, v0); 4185 4186 pmull (v19, T1Q, v1, v16, T1D); 4187 pmull2(v20, T1Q, v1, v16, T2D); 4188 ldrq(v1, Address(buf, 0x20)); 4189 eor3(v1, T16B, v19, v20, v1); 4190 4191 pmull (v21, T1Q, v2, v16, T1D); 4192 pmull2(v22, T1Q, v2, v16, T2D); 4193 ldrq(v2, Address(buf, 0x30)); 4194 eor3(v2, T16B, v21, v22, v2); 4195 4196 pmull (v23, T1Q, v3, v16, T1D); 4197 pmull2(v24, T1Q, v3, v16, T2D); 4198 ldrq(v3, Address(buf, 0x40)); 4199 eor3(v3, T16B, v23, v24, v3); 4200 4201 pmull (v25, T1Q, v4, v16, T1D); 4202 pmull2(v26, T1Q, v4, v16, T2D); 4203 ldrq(v4, Address(buf, 0x50)); 4204 eor3(v4, T16B, v25, v26, v4); 4205 4206 pmull (v27, T1Q, v5, v16, T1D); 4207 pmull2(v28, T1Q, v5, v16, T2D); 4208 ldrq(v5, Address(buf, 0x60)); 4209 eor3(v5, T16B, v27, v28, v5); 4210 4211 pmull (v29, T1Q, v6, v16, T1D); 4212 pmull2(v30, T1Q, v6, v16, T2D); 4213 ldrq(v6, Address(buf, 0x70)); 4214 eor3(v6, T16B, v29, v30, v6); 4215 4216 // Reuse registers v23, v24. 4217 // Using them won't block the first instruction of the next iteration. 4218 pmull (v23, T1Q, v7, v16, T1D); 4219 pmull2(v24, T1Q, v7, v16, T2D); 4220 ldrq(v7, Address(pre(buf, 0x80))); 4221 eor3(v7, T16B, v23, v24, v7); 4222 4223 subs(len, len, 0x80); 4224 br(Assembler::GE, CRC_by128_loop); 4225 4226 // fold into 512 bits 4227 // Use v31 for constants because v16 can be still in use. 4228 ldrq(v31, Address(table, 0x10)); 4229 4230 pmull (v17, T1Q, v0, v31, T1D); 4231 pmull2(v18, T1Q, v0, v31, T2D); 4232 eor3(v0, T16B, v17, v18, v4); 4233 4234 pmull (v19, T1Q, v1, v31, T1D); 4235 pmull2(v20, T1Q, v1, v31, T2D); 4236 eor3(v1, T16B, v19, v20, v5); 4237 4238 pmull (v21, T1Q, v2, v31, T1D); 4239 pmull2(v22, T1Q, v2, v31, T2D); 4240 eor3(v2, T16B, v21, v22, v6); 4241 4242 pmull (v23, T1Q, v3, v31, T1D); 4243 pmull2(v24, T1Q, v3, v31, T2D); 4244 eor3(v3, T16B, v23, v24, v7); 4245 4246 // fold into 128 bits 4247 // Use v17 for constants because v31 can be still in use. 4248 ldrq(v17, Address(table, 0x20)); 4249 pmull (v25, T1Q, v0, v17, T1D); 4250 pmull2(v26, T1Q, v0, v17, T2D); 4251 eor3(v3, T16B, v3, v25, v26); 4252 4253 // Use v18 for constants because v17 can be still in use. 4254 ldrq(v18, Address(table, 0x30)); 4255 pmull (v27, T1Q, v1, v18, T1D); 4256 pmull2(v28, T1Q, v1, v18, T2D); 4257 eor3(v3, T16B, v3, v27, v28); 4258 4259 // Use v19 for constants because v18 can be still in use. 4260 ldrq(v19, Address(table, 0x40)); 4261 pmull (v29, T1Q, v2, v19, T1D); 4262 pmull2(v30, T1Q, v2, v19, T2D); 4263 eor3(v0, T16B, v3, v29, v30); 4264 4265 add(len, len, 0x80); 4266 add(buf, buf, 0x10); 4267 4268 mov(tmp0, v0, D, 0); 4269 mov(tmp1, v0, D, 1); 4270 } 4271 4272 SkipIfEqual::SkipIfEqual( 4273 MacroAssembler* masm, const bool* flag_addr, bool value) { 4274 _masm = masm; 4275 uint64_t offset; 4276 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4277 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4278 if (value) { 4279 _masm->cbnzw(rscratch1, _label); 4280 } else { 4281 _masm->cbzw(rscratch1, _label); 4282 } 4283 } 4284 4285 SkipIfEqual::~SkipIfEqual() { 4286 _masm->bind(_label); 4287 } 4288 4289 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4290 Address adr; 4291 switch(dst.getMode()) { 4292 case Address::base_plus_offset: 4293 // This is the expected mode, although we allow all the other 4294 // forms below. 4295 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4296 break; 4297 default: 4298 lea(rscratch2, dst); 4299 adr = Address(rscratch2); 4300 break; 4301 } 4302 ldr(rscratch1, adr); 4303 add(rscratch1, rscratch1, src); 4304 str(rscratch1, adr); 4305 } 4306 4307 void MacroAssembler::cmpptr(Register src1, Address src2) { 4308 uint64_t offset; 4309 adrp(rscratch1, src2, offset); 4310 ldr(rscratch1, Address(rscratch1, offset)); 4311 cmp(src1, rscratch1); 4312 } 4313 4314 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4315 cmp(obj1, obj2); 4316 } 4317 4318 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4319 load_method_holder(rresult, rmethod); 4320 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4321 } 4322 4323 void MacroAssembler::load_method_holder(Register holder, Register method) { 4324 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4325 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4326 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4327 } 4328 4329 void MacroAssembler::load_klass(Register dst, Register src) { 4330 if (UseCompressedClassPointers) { 4331 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4332 decode_klass_not_null(dst); 4333 } else { 4334 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4335 } 4336 } 4337 4338 // ((OopHandle)result).resolve(); 4339 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4340 // OopHandle::resolve is an indirection. 4341 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4342 } 4343 4344 // ((WeakHandle)result).resolve(); 4345 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4346 assert_different_registers(result, tmp1, tmp2); 4347 Label resolved; 4348 4349 // A null weak handle resolves to null. 4350 cbz(result, resolved); 4351 4352 // Only 64 bit platforms support GCs that require a tmp register 4353 // WeakHandle::resolve is an indirection like jweak. 4354 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4355 result, Address(result), tmp1, tmp2); 4356 bind(resolved); 4357 } 4358 4359 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4360 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4361 ldr(dst, Address(rmethod, Method::const_offset())); 4362 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4363 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4364 ldr(dst, Address(dst, mirror_offset)); 4365 resolve_oop_handle(dst, tmp1, tmp2); 4366 } 4367 4368 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4369 if (UseCompressedClassPointers) { 4370 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4371 if (CompressedKlassPointers::base() == nullptr) { 4372 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4373 return; 4374 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4375 && CompressedKlassPointers::shift() == 0) { 4376 // Only the bottom 32 bits matter 4377 cmpw(trial_klass, tmp); 4378 return; 4379 } 4380 decode_klass_not_null(tmp); 4381 } else { 4382 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4383 } 4384 cmp(trial_klass, tmp); 4385 } 4386 4387 void MacroAssembler::store_klass(Register dst, Register src) { 4388 // FIXME: Should this be a store release? concurrent gcs assumes 4389 // klass length is valid if klass field is not null. 4390 if (UseCompressedClassPointers) { 4391 encode_klass_not_null(src); 4392 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4393 } else { 4394 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4395 } 4396 } 4397 4398 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4399 if (UseCompressedClassPointers) { 4400 // Store to klass gap in destination 4401 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4402 } 4403 } 4404 4405 // Algorithm must match CompressedOops::encode. 4406 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4407 #ifdef ASSERT 4408 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4409 #endif 4410 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4411 if (CompressedOops::base() == nullptr) { 4412 if (CompressedOops::shift() != 0) { 4413 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4414 lsr(d, s, LogMinObjAlignmentInBytes); 4415 } else { 4416 mov(d, s); 4417 } 4418 } else { 4419 subs(d, s, rheapbase); 4420 csel(d, d, zr, Assembler::HS); 4421 lsr(d, d, LogMinObjAlignmentInBytes); 4422 4423 /* Old algorithm: is this any worse? 4424 Label nonnull; 4425 cbnz(r, nonnull); 4426 sub(r, r, rheapbase); 4427 bind(nonnull); 4428 lsr(r, r, LogMinObjAlignmentInBytes); 4429 */ 4430 } 4431 } 4432 4433 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4434 #ifdef ASSERT 4435 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4436 if (CheckCompressedOops) { 4437 Label ok; 4438 cbnz(r, ok); 4439 stop("null oop passed to encode_heap_oop_not_null"); 4440 bind(ok); 4441 } 4442 #endif 4443 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 4444 if (CompressedOops::base() != nullptr) { 4445 sub(r, r, rheapbase); 4446 } 4447 if (CompressedOops::shift() != 0) { 4448 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4449 lsr(r, r, LogMinObjAlignmentInBytes); 4450 } 4451 } 4452 4453 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 4454 #ifdef ASSERT 4455 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 4456 if (CheckCompressedOops) { 4457 Label ok; 4458 cbnz(src, ok); 4459 stop("null oop passed to encode_heap_oop_not_null2"); 4460 bind(ok); 4461 } 4462 #endif 4463 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 4464 4465 Register data = src; 4466 if (CompressedOops::base() != nullptr) { 4467 sub(dst, src, rheapbase); 4468 data = dst; 4469 } 4470 if (CompressedOops::shift() != 0) { 4471 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4472 lsr(dst, data, LogMinObjAlignmentInBytes); 4473 data = dst; 4474 } 4475 if (data == src) 4476 mov(dst, src); 4477 } 4478 4479 void MacroAssembler::decode_heap_oop(Register d, Register s) { 4480 #ifdef ASSERT 4481 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 4482 #endif 4483 if (CompressedOops::base() == nullptr) { 4484 if (CompressedOops::shift() != 0 || d != s) { 4485 lsl(d, s, CompressedOops::shift()); 4486 } 4487 } else { 4488 Label done; 4489 if (d != s) 4490 mov(d, s); 4491 cbz(s, done); 4492 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 4493 bind(done); 4494 } 4495 verify_oop_msg(d, "broken oop in decode_heap_oop"); 4496 } 4497 4498 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4499 assert (UseCompressedOops, "should only be used for compressed headers"); 4500 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4501 // Cannot assert, unverified entry point counts instructions (see .ad file) 4502 // vtableStubs also counts instructions in pd_code_size_limit. 4503 // Also do not verify_oop as this is called by verify_oop. 4504 if (CompressedOops::shift() != 0) { 4505 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4506 if (CompressedOops::base() != nullptr) { 4507 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 4508 } else { 4509 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 4510 } 4511 } else { 4512 assert (CompressedOops::base() == nullptr, "sanity"); 4513 } 4514 } 4515 4516 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 4517 assert (UseCompressedOops, "should only be used for compressed headers"); 4518 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4519 // Cannot assert, unverified entry point counts instructions (see .ad file) 4520 // vtableStubs also counts instructions in pd_code_size_limit. 4521 // Also do not verify_oop as this is called by verify_oop. 4522 if (CompressedOops::shift() != 0) { 4523 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4524 if (CompressedOops::base() != nullptr) { 4525 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 4526 } else { 4527 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 4528 } 4529 } else { 4530 assert (CompressedOops::base() == nullptr, "sanity"); 4531 if (dst != src) { 4532 mov(dst, src); 4533 } 4534 } 4535 } 4536 4537 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 4538 4539 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 4540 assert(UseCompressedClassPointers, "not using compressed class pointers"); 4541 assert(Metaspace::initialized(), "metaspace not initialized yet"); 4542 4543 if (_klass_decode_mode != KlassDecodeNone) { 4544 return _klass_decode_mode; 4545 } 4546 4547 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 4548 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 4549 4550 if (CompressedKlassPointers::base() == nullptr) { 4551 return (_klass_decode_mode = KlassDecodeZero); 4552 } 4553 4554 if (operand_valid_for_logical_immediate( 4555 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 4556 const uint64_t range_mask = 4557 (1ULL << log2i(CompressedKlassPointers::range())) - 1; 4558 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 4559 return (_klass_decode_mode = KlassDecodeXor); 4560 } 4561 } 4562 4563 const uint64_t shifted_base = 4564 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 4565 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 4566 "compressed class base bad alignment"); 4567 4568 return (_klass_decode_mode = KlassDecodeMovk); 4569 } 4570 4571 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 4572 switch (klass_decode_mode()) { 4573 case KlassDecodeZero: 4574 if (CompressedKlassPointers::shift() != 0) { 4575 lsr(dst, src, LogKlassAlignmentInBytes); 4576 } else { 4577 if (dst != src) mov(dst, src); 4578 } 4579 break; 4580 4581 case KlassDecodeXor: 4582 if (CompressedKlassPointers::shift() != 0) { 4583 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4584 lsr(dst, dst, LogKlassAlignmentInBytes); 4585 } else { 4586 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4587 } 4588 break; 4589 4590 case KlassDecodeMovk: 4591 if (CompressedKlassPointers::shift() != 0) { 4592 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 4593 } else { 4594 movw(dst, src); 4595 } 4596 break; 4597 4598 case KlassDecodeNone: 4599 ShouldNotReachHere(); 4600 break; 4601 } 4602 } 4603 4604 void MacroAssembler::encode_klass_not_null(Register r) { 4605 encode_klass_not_null(r, r); 4606 } 4607 4608 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 4609 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 4610 4611 switch (klass_decode_mode()) { 4612 case KlassDecodeZero: 4613 if (CompressedKlassPointers::shift() != 0) { 4614 lsl(dst, src, LogKlassAlignmentInBytes); 4615 } else { 4616 if (dst != src) mov(dst, src); 4617 } 4618 break; 4619 4620 case KlassDecodeXor: 4621 if (CompressedKlassPointers::shift() != 0) { 4622 lsl(dst, src, LogKlassAlignmentInBytes); 4623 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 4624 } else { 4625 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4626 } 4627 break; 4628 4629 case KlassDecodeMovk: { 4630 const uint64_t shifted_base = 4631 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 4632 4633 if (dst != src) movw(dst, src); 4634 movk(dst, shifted_base >> 32, 32); 4635 4636 if (CompressedKlassPointers::shift() != 0) { 4637 lsl(dst, dst, LogKlassAlignmentInBytes); 4638 } 4639 4640 break; 4641 } 4642 4643 case KlassDecodeNone: 4644 ShouldNotReachHere(); 4645 break; 4646 } 4647 } 4648 4649 void MacroAssembler::decode_klass_not_null(Register r) { 4650 decode_klass_not_null(r, r); 4651 } 4652 4653 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 4654 #ifdef ASSERT 4655 { 4656 ThreadInVMfromUnknown tiv; 4657 assert (UseCompressedOops, "should only be used for compressed oops"); 4658 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4659 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4660 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 4661 } 4662 #endif 4663 int oop_index = oop_recorder()->find_index(obj); 4664 InstructionMark im(this); 4665 RelocationHolder rspec = oop_Relocation::spec(oop_index); 4666 code_section()->relocate(inst_mark(), rspec); 4667 movz(dst, 0xDEAD, 16); 4668 movk(dst, 0xBEEF); 4669 } 4670 4671 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 4672 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 4673 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4674 int index = oop_recorder()->find_index(k); 4675 assert(! Universe::heap()->is_in(k), "should not be an oop"); 4676 4677 InstructionMark im(this); 4678 RelocationHolder rspec = metadata_Relocation::spec(index); 4679 code_section()->relocate(inst_mark(), rspec); 4680 narrowKlass nk = CompressedKlassPointers::encode(k); 4681 movz(dst, (nk >> 16), 16); 4682 movk(dst, nk & 0xffff); 4683 } 4684 4685 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 4686 Register dst, Address src, 4687 Register tmp1, Register tmp2) { 4688 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4689 decorators = AccessInternal::decorator_fixup(decorators, type); 4690 bool as_raw = (decorators & AS_RAW) != 0; 4691 if (as_raw) { 4692 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 4693 } else { 4694 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 4695 } 4696 } 4697 4698 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 4699 Address dst, Register val, 4700 Register tmp1, Register tmp2, Register tmp3) { 4701 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4702 decorators = AccessInternal::decorator_fixup(decorators, type); 4703 bool as_raw = (decorators & AS_RAW) != 0; 4704 if (as_raw) { 4705 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 4706 } else { 4707 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 4708 } 4709 } 4710 4711 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 4712 Register tmp2, DecoratorSet decorators) { 4713 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 4714 } 4715 4716 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 4717 Register tmp2, DecoratorSet decorators) { 4718 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 4719 } 4720 4721 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 4722 Register tmp2, Register tmp3, DecoratorSet decorators) { 4723 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 4724 } 4725 4726 // Used for storing nulls. 4727 void MacroAssembler::store_heap_oop_null(Address dst) { 4728 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 4729 } 4730 4731 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 4732 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 4733 int index = oop_recorder()->allocate_metadata_index(obj); 4734 RelocationHolder rspec = metadata_Relocation::spec(index); 4735 return Address((address)obj, rspec); 4736 } 4737 4738 // Move an oop into a register. 4739 void MacroAssembler::movoop(Register dst, jobject obj) { 4740 int oop_index; 4741 if (obj == nullptr) { 4742 oop_index = oop_recorder()->allocate_oop_index(obj); 4743 } else { 4744 #ifdef ASSERT 4745 { 4746 ThreadInVMfromUnknown tiv; 4747 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 4748 } 4749 #endif 4750 oop_index = oop_recorder()->find_index(obj); 4751 } 4752 RelocationHolder rspec = oop_Relocation::spec(oop_index); 4753 4754 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 4755 mov(dst, Address((address)obj, rspec)); 4756 } else { 4757 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 4758 ldr_constant(dst, Address(dummy, rspec)); 4759 } 4760 4761 } 4762 4763 // Move a metadata address into a register. 4764 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 4765 int oop_index; 4766 if (obj == nullptr) { 4767 oop_index = oop_recorder()->allocate_metadata_index(obj); 4768 } else { 4769 oop_index = oop_recorder()->find_index(obj); 4770 } 4771 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 4772 mov(dst, Address((address)obj, rspec)); 4773 } 4774 4775 Address MacroAssembler::constant_oop_address(jobject obj) { 4776 #ifdef ASSERT 4777 { 4778 ThreadInVMfromUnknown tiv; 4779 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4780 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 4781 } 4782 #endif 4783 int oop_index = oop_recorder()->find_index(obj); 4784 return Address((address)obj, oop_Relocation::spec(oop_index)); 4785 } 4786 4787 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4788 void MacroAssembler::tlab_allocate(Register obj, 4789 Register var_size_in_bytes, 4790 int con_size_in_bytes, 4791 Register t1, 4792 Register t2, 4793 Label& slow_case) { 4794 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4795 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4796 } 4797 4798 void MacroAssembler::verify_tlab() { 4799 #ifdef ASSERT 4800 if (UseTLAB && VerifyOops) { 4801 Label next, ok; 4802 4803 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 4804 4805 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 4806 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 4807 cmp(rscratch2, rscratch1); 4808 br(Assembler::HS, next); 4809 STOP("assert(top >= start)"); 4810 should_not_reach_here(); 4811 4812 bind(next); 4813 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 4814 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 4815 cmp(rscratch2, rscratch1); 4816 br(Assembler::HS, ok); 4817 STOP("assert(top <= end)"); 4818 should_not_reach_here(); 4819 4820 bind(ok); 4821 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 4822 } 4823 #endif 4824 } 4825 4826 // Writes to stack successive pages until offset reached to check for 4827 // stack overflow + shadow pages. This clobbers tmp. 4828 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 4829 assert_different_registers(tmp, size, rscratch1); 4830 mov(tmp, sp); 4831 // Bang stack for total size given plus shadow page size. 4832 // Bang one page at a time because large size can bang beyond yellow and 4833 // red zones. 4834 Label loop; 4835 mov(rscratch1, (int)os::vm_page_size()); 4836 bind(loop); 4837 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 4838 subsw(size, size, rscratch1); 4839 str(size, Address(tmp)); 4840 br(Assembler::GT, loop); 4841 4842 // Bang down shadow pages too. 4843 // At this point, (tmp-0) is the last address touched, so don't 4844 // touch it again. (It was touched as (tmp-pagesize) but then tmp 4845 // was post-decremented.) Skip this address by starting at i=1, and 4846 // touch a few more pages below. N.B. It is important to touch all 4847 // the way down to and including i=StackShadowPages. 4848 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 4849 // this could be any sized move but this is can be a debugging crumb 4850 // so the bigger the better. 4851 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 4852 str(size, Address(tmp)); 4853 } 4854 } 4855 4856 // Move the address of the polling page into dest. 4857 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 4858 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 4859 } 4860 4861 // Read the polling page. The address of the polling page must 4862 // already be in r. 4863 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 4864 address mark; 4865 { 4866 InstructionMark im(this); 4867 code_section()->relocate(inst_mark(), rtype); 4868 ldrw(zr, Address(r, 0)); 4869 mark = inst_mark(); 4870 } 4871 verify_cross_modify_fence_not_required(); 4872 return mark; 4873 } 4874 4875 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 4876 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 4877 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 4878 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 4879 uint64_t dest_page = (uint64_t)dest.target() >> 12; 4880 int64_t offset_low = dest_page - low_page; 4881 int64_t offset_high = dest_page - high_page; 4882 4883 assert(is_valid_AArch64_address(dest.target()), "bad address"); 4884 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 4885 4886 InstructionMark im(this); 4887 code_section()->relocate(inst_mark(), dest.rspec()); 4888 // 8143067: Ensure that the adrp can reach the dest from anywhere within 4889 // the code cache so that if it is relocated we know it will still reach 4890 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 4891 _adrp(reg1, dest.target()); 4892 } else { 4893 uint64_t target = (uint64_t)dest.target(); 4894 uint64_t adrp_target 4895 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 4896 4897 _adrp(reg1, (address)adrp_target); 4898 movk(reg1, target >> 32, 32); 4899 } 4900 byte_offset = (uint64_t)dest.target() & 0xfff; 4901 } 4902 4903 void MacroAssembler::load_byte_map_base(Register reg) { 4904 CardTable::CardValue* byte_map_base = 4905 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 4906 4907 // Strictly speaking the byte_map_base isn't an address at all, and it might 4908 // even be negative. It is thus materialised as a constant. 4909 mov(reg, (uint64_t)byte_map_base); 4910 } 4911 4912 void MacroAssembler::build_frame(int framesize) { 4913 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 4914 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 4915 protect_return_address(); 4916 if (framesize < ((1 << 9) + 2 * wordSize)) { 4917 sub(sp, sp, framesize); 4918 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 4919 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 4920 } else { 4921 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 4922 if (PreserveFramePointer) mov(rfp, sp); 4923 if (framesize < ((1 << 12) + 2 * wordSize)) 4924 sub(sp, sp, framesize - 2 * wordSize); 4925 else { 4926 mov(rscratch1, framesize - 2 * wordSize); 4927 sub(sp, sp, rscratch1); 4928 } 4929 } 4930 verify_cross_modify_fence_not_required(); 4931 } 4932 4933 void MacroAssembler::remove_frame(int framesize) { 4934 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 4935 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 4936 if (framesize < ((1 << 9) + 2 * wordSize)) { 4937 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 4938 add(sp, sp, framesize); 4939 } else { 4940 if (framesize < ((1 << 12) + 2 * wordSize)) 4941 add(sp, sp, framesize - 2 * wordSize); 4942 else { 4943 mov(rscratch1, framesize - 2 * wordSize); 4944 add(sp, sp, rscratch1); 4945 } 4946 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 4947 } 4948 authenticate_return_address(); 4949 } 4950 4951 4952 // This method counts leading positive bytes (highest bit not set) in provided byte array 4953 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 4954 // Simple and most common case of aligned small array which is not at the 4955 // end of memory page is placed here. All other cases are in stub. 4956 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 4957 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 4958 assert_different_registers(ary1, len, result); 4959 4960 mov(result, len); 4961 cmpw(len, 0); 4962 br(LE, DONE); 4963 cmpw(len, 4 * wordSize); 4964 br(GE, STUB_LONG); // size > 32 then go to stub 4965 4966 int shift = 64 - exact_log2(os::vm_page_size()); 4967 lsl(rscratch1, ary1, shift); 4968 mov(rscratch2, (size_t)(4 * wordSize) << shift); 4969 adds(rscratch2, rscratch1, rscratch2); // At end of page? 4970 br(CS, STUB); // at the end of page then go to stub 4971 subs(len, len, wordSize); 4972 br(LT, END); 4973 4974 BIND(LOOP); 4975 ldr(rscratch1, Address(post(ary1, wordSize))); 4976 tst(rscratch1, UPPER_BIT_MASK); 4977 br(NE, SET_RESULT); 4978 subs(len, len, wordSize); 4979 br(GE, LOOP); 4980 cmpw(len, -wordSize); 4981 br(EQ, DONE); 4982 4983 BIND(END); 4984 ldr(rscratch1, Address(ary1)); 4985 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 4986 lslv(rscratch1, rscratch1, rscratch2); 4987 tst(rscratch1, UPPER_BIT_MASK); 4988 br(NE, SET_RESULT); 4989 b(DONE); 4990 4991 BIND(STUB); 4992 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 4993 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 4994 address tpc1 = trampoline_call(count_pos); 4995 if (tpc1 == nullptr) { 4996 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 4997 postcond(pc() == badAddress); 4998 return nullptr; 4999 } 5000 b(DONE); 5001 5002 BIND(STUB_LONG); 5003 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5004 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5005 address tpc2 = trampoline_call(count_pos_long); 5006 if (tpc2 == nullptr) { 5007 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5008 postcond(pc() == badAddress); 5009 return nullptr; 5010 } 5011 b(DONE); 5012 5013 BIND(SET_RESULT); 5014 5015 add(len, len, wordSize); 5016 sub(result, result, len); 5017 5018 BIND(DONE); 5019 postcond(pc() != badAddress); 5020 return pc(); 5021 } 5022 5023 // Clobbers: rscratch1, rscratch2, rflags 5024 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5025 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5026 Register tmp4, Register tmp5, Register result, 5027 Register cnt1, int elem_size) { 5028 Label DONE, SAME; 5029 Register tmp1 = rscratch1; 5030 Register tmp2 = rscratch2; 5031 Register cnt2 = tmp2; // cnt2 only used in array length compare 5032 int elem_per_word = wordSize/elem_size; 5033 int log_elem_size = exact_log2(elem_size); 5034 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5035 int base_offset 5036 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5037 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5038 5039 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5040 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5041 5042 #ifndef PRODUCT 5043 { 5044 const char kind = (elem_size == 2) ? 'U' : 'L'; 5045 char comment[64]; 5046 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5047 BLOCK_COMMENT(comment); 5048 } 5049 #endif 5050 5051 // if (a1 == a2) 5052 // return true; 5053 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5054 br(EQ, SAME); 5055 5056 if (UseSimpleArrayEquals) { 5057 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5058 // if (a1 == nullptr || a2 == nullptr) 5059 // return false; 5060 // a1 & a2 == 0 means (some-pointer is null) or 5061 // (very-rare-or-even-probably-impossible-pointer-values) 5062 // so, we can save one branch in most cases 5063 tst(a1, a2); 5064 mov(result, false); 5065 br(EQ, A_MIGHT_BE_NULL); 5066 // if (a1.length != a2.length) 5067 // return false; 5068 bind(A_IS_NOT_NULL); 5069 ldrw(cnt1, Address(a1, length_offset)); 5070 ldrw(cnt2, Address(a2, length_offset)); 5071 eorw(tmp5, cnt1, cnt2); 5072 cbnzw(tmp5, DONE); 5073 lea(a1, Address(a1, base_offset)); 5074 lea(a2, Address(a2, base_offset)); 5075 // Check for short strings, i.e. smaller than wordSize. 5076 subs(cnt1, cnt1, elem_per_word); 5077 br(Assembler::LT, SHORT); 5078 // Main 8 byte comparison loop. 5079 bind(NEXT_WORD); { 5080 ldr(tmp1, Address(post(a1, wordSize))); 5081 ldr(tmp2, Address(post(a2, wordSize))); 5082 subs(cnt1, cnt1, elem_per_word); 5083 eor(tmp5, tmp1, tmp2); 5084 cbnz(tmp5, DONE); 5085 } br(GT, NEXT_WORD); 5086 // Last longword. In the case where length == 4 we compare the 5087 // same longword twice, but that's still faster than another 5088 // conditional branch. 5089 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5090 // length == 4. 5091 if (log_elem_size > 0) 5092 lsl(cnt1, cnt1, log_elem_size); 5093 ldr(tmp3, Address(a1, cnt1)); 5094 ldr(tmp4, Address(a2, cnt1)); 5095 eor(tmp5, tmp3, tmp4); 5096 cbnz(tmp5, DONE); 5097 b(SAME); 5098 bind(A_MIGHT_BE_NULL); 5099 // in case both a1 and a2 are not-null, proceed with loads 5100 cbz(a1, DONE); 5101 cbz(a2, DONE); 5102 b(A_IS_NOT_NULL); 5103 bind(SHORT); 5104 5105 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5106 { 5107 ldrw(tmp1, Address(post(a1, 4))); 5108 ldrw(tmp2, Address(post(a2, 4))); 5109 eorw(tmp5, tmp1, tmp2); 5110 cbnzw(tmp5, DONE); 5111 } 5112 bind(TAIL03); 5113 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5114 { 5115 ldrh(tmp3, Address(post(a1, 2))); 5116 ldrh(tmp4, Address(post(a2, 2))); 5117 eorw(tmp5, tmp3, tmp4); 5118 cbnzw(tmp5, DONE); 5119 } 5120 bind(TAIL01); 5121 if (elem_size == 1) { // Only needed when comparing byte arrays. 5122 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5123 { 5124 ldrb(tmp1, a1); 5125 ldrb(tmp2, a2); 5126 eorw(tmp5, tmp1, tmp2); 5127 cbnzw(tmp5, DONE); 5128 } 5129 } 5130 } else { 5131 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5132 CSET_EQ, LAST_CHECK; 5133 mov(result, false); 5134 cbz(a1, DONE); 5135 ldrw(cnt1, Address(a1, length_offset)); 5136 cbz(a2, DONE); 5137 ldrw(cnt2, Address(a2, length_offset)); 5138 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5139 // faster to perform another branch before comparing a1 and a2 5140 cmp(cnt1, (u1)elem_per_word); 5141 br(LE, SHORT); // short or same 5142 ldr(tmp3, Address(pre(a1, base_offset))); 5143 subs(zr, cnt1, stubBytesThreshold); 5144 br(GE, STUB); 5145 ldr(tmp4, Address(pre(a2, base_offset))); 5146 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5147 cmp(cnt2, cnt1); 5148 br(NE, DONE); 5149 5150 // Main 16 byte comparison loop with 2 exits 5151 bind(NEXT_DWORD); { 5152 ldr(tmp1, Address(pre(a1, wordSize))); 5153 ldr(tmp2, Address(pre(a2, wordSize))); 5154 subs(cnt1, cnt1, 2 * elem_per_word); 5155 br(LE, TAIL); 5156 eor(tmp4, tmp3, tmp4); 5157 cbnz(tmp4, DONE); 5158 ldr(tmp3, Address(pre(a1, wordSize))); 5159 ldr(tmp4, Address(pre(a2, wordSize))); 5160 cmp(cnt1, (u1)elem_per_word); 5161 br(LE, TAIL2); 5162 cmp(tmp1, tmp2); 5163 } br(EQ, NEXT_DWORD); 5164 b(DONE); 5165 5166 bind(TAIL); 5167 eor(tmp4, tmp3, tmp4); 5168 eor(tmp2, tmp1, tmp2); 5169 lslv(tmp2, tmp2, tmp5); 5170 orr(tmp5, tmp4, tmp2); 5171 cmp(tmp5, zr); 5172 b(CSET_EQ); 5173 5174 bind(TAIL2); 5175 eor(tmp2, tmp1, tmp2); 5176 cbnz(tmp2, DONE); 5177 b(LAST_CHECK); 5178 5179 bind(STUB); 5180 ldr(tmp4, Address(pre(a2, base_offset))); 5181 cmp(cnt2, cnt1); 5182 br(NE, DONE); 5183 if (elem_size == 2) { // convert to byte counter 5184 lsl(cnt1, cnt1, 1); 5185 } 5186 eor(tmp5, tmp3, tmp4); 5187 cbnz(tmp5, DONE); 5188 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5189 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5190 address tpc = trampoline_call(stub); 5191 if (tpc == nullptr) { 5192 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5193 postcond(pc() == badAddress); 5194 return nullptr; 5195 } 5196 b(DONE); 5197 5198 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5199 // so, if a2 == null => return false(0), else return true, so we can return a2 5200 mov(result, a2); 5201 b(DONE); 5202 bind(SHORT); 5203 cmp(cnt2, cnt1); 5204 br(NE, DONE); 5205 cbz(cnt1, SAME); 5206 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5207 ldr(tmp3, Address(a1, base_offset)); 5208 ldr(tmp4, Address(a2, base_offset)); 5209 bind(LAST_CHECK); 5210 eor(tmp4, tmp3, tmp4); 5211 lslv(tmp5, tmp4, tmp5); 5212 cmp(tmp5, zr); 5213 bind(CSET_EQ); 5214 cset(result, EQ); 5215 b(DONE); 5216 } 5217 5218 bind(SAME); 5219 mov(result, true); 5220 // That's it. 5221 bind(DONE); 5222 5223 BLOCK_COMMENT("} array_equals"); 5224 postcond(pc() != badAddress); 5225 return pc(); 5226 } 5227 5228 // Compare Strings 5229 5230 // For Strings we're passed the address of the first characters in a1 5231 // and a2 and the length in cnt1. 5232 // elem_size is the element size in bytes: either 1 or 2. 5233 // There are two implementations. For arrays >= 8 bytes, all 5234 // comparisons (including the final one, which may overlap) are 5235 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5236 // halfword, then a short, and then a byte. 5237 5238 void MacroAssembler::string_equals(Register a1, Register a2, 5239 Register result, Register cnt1, int elem_size) 5240 { 5241 Label SAME, DONE, SHORT, NEXT_WORD; 5242 Register tmp1 = rscratch1; 5243 Register tmp2 = rscratch2; 5244 Register cnt2 = tmp2; // cnt2 only used in array length compare 5245 5246 assert(elem_size == 1 || elem_size == 2, "must be 2 or 1 byte"); 5247 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5248 5249 #ifndef PRODUCT 5250 { 5251 const char kind = (elem_size == 2) ? 'U' : 'L'; 5252 char comment[64]; 5253 snprintf(comment, sizeof comment, "{string_equals%c", kind); 5254 BLOCK_COMMENT(comment); 5255 } 5256 #endif 5257 5258 mov(result, false); 5259 5260 // Check for short strings, i.e. smaller than wordSize. 5261 subs(cnt1, cnt1, wordSize); 5262 br(Assembler::LT, SHORT); 5263 // Main 8 byte comparison loop. 5264 bind(NEXT_WORD); { 5265 ldr(tmp1, Address(post(a1, wordSize))); 5266 ldr(tmp2, Address(post(a2, wordSize))); 5267 subs(cnt1, cnt1, wordSize); 5268 eor(tmp1, tmp1, tmp2); 5269 cbnz(tmp1, DONE); 5270 } br(GT, NEXT_WORD); 5271 // Last longword. In the case where length == 4 we compare the 5272 // same longword twice, but that's still faster than another 5273 // conditional branch. 5274 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5275 // length == 4. 5276 ldr(tmp1, Address(a1, cnt1)); 5277 ldr(tmp2, Address(a2, cnt1)); 5278 eor(tmp2, tmp1, tmp2); 5279 cbnz(tmp2, DONE); 5280 b(SAME); 5281 5282 bind(SHORT); 5283 Label TAIL03, TAIL01; 5284 5285 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5286 { 5287 ldrw(tmp1, Address(post(a1, 4))); 5288 ldrw(tmp2, Address(post(a2, 4))); 5289 eorw(tmp1, tmp1, tmp2); 5290 cbnzw(tmp1, DONE); 5291 } 5292 bind(TAIL03); 5293 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5294 { 5295 ldrh(tmp1, Address(post(a1, 2))); 5296 ldrh(tmp2, Address(post(a2, 2))); 5297 eorw(tmp1, tmp1, tmp2); 5298 cbnzw(tmp1, DONE); 5299 } 5300 bind(TAIL01); 5301 if (elem_size == 1) { // Only needed when comparing 1-byte elements 5302 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5303 { 5304 ldrb(tmp1, a1); 5305 ldrb(tmp2, a2); 5306 eorw(tmp1, tmp1, tmp2); 5307 cbnzw(tmp1, DONE); 5308 } 5309 } 5310 // Arrays are equal. 5311 bind(SAME); 5312 mov(result, true); 5313 5314 // That's it. 5315 bind(DONE); 5316 BLOCK_COMMENT("} string_equals"); 5317 } 5318 5319 5320 // The size of the blocks erased by the zero_blocks stub. We must 5321 // handle anything smaller than this ourselves in zero_words(). 5322 const int MacroAssembler::zero_words_block_size = 8; 5323 5324 // zero_words() is used by C2 ClearArray patterns and by 5325 // C1_MacroAssembler. It is as small as possible, handling small word 5326 // counts locally and delegating anything larger to the zero_blocks 5327 // stub. It is expanded many times in compiled code, so it is 5328 // important to keep it short. 5329 5330 // ptr: Address of a buffer to be zeroed. 5331 // cnt: Count in HeapWords. 5332 // 5333 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5334 address MacroAssembler::zero_words(Register ptr, Register cnt) 5335 { 5336 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5337 5338 BLOCK_COMMENT("zero_words {"); 5339 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5340 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5341 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5342 5343 subs(rscratch1, cnt, zero_words_block_size); 5344 Label around; 5345 br(LO, around); 5346 { 5347 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5348 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5349 // Make sure this is a C2 compilation. C1 allocates space only for 5350 // trampoline stubs generated by Call LIR ops, and in any case it 5351 // makes sense for a C1 compilation task to proceed as quickly as 5352 // possible. 5353 CompileTask* task; 5354 if (StubRoutines::aarch64::complete() 5355 && Thread::current()->is_Compiler_thread() 5356 && (task = ciEnv::current()->task()) 5357 && is_c2_compile(task->comp_level())) { 5358 address tpc = trampoline_call(zero_blocks); 5359 if (tpc == nullptr) { 5360 DEBUG_ONLY(reset_labels(around)); 5361 return nullptr; 5362 } 5363 } else { 5364 far_call(zero_blocks); 5365 } 5366 } 5367 bind(around); 5368 5369 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5370 // for us. 5371 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5372 Label l; 5373 tbz(cnt, exact_log2(i), l); 5374 for (int j = 0; j < i; j += 2) { 5375 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5376 } 5377 bind(l); 5378 } 5379 { 5380 Label l; 5381 tbz(cnt, 0, l); 5382 str(zr, Address(ptr)); 5383 bind(l); 5384 } 5385 5386 BLOCK_COMMENT("} zero_words"); 5387 return pc(); 5388 } 5389 5390 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5391 // cnt: Immediate count in HeapWords. 5392 // 5393 // r10, r11, rscratch1, and rscratch2 are clobbered. 5394 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5395 { 5396 assert(wordSize <= BlockZeroingLowLimit, 5397 "increase BlockZeroingLowLimit"); 5398 address result = nullptr; 5399 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5400 #ifndef PRODUCT 5401 { 5402 char buf[64]; 5403 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5404 BLOCK_COMMENT(buf); 5405 } 5406 #endif 5407 if (cnt >= 16) { 5408 uint64_t loops = cnt/16; 5409 if (loops > 1) { 5410 mov(rscratch2, loops - 1); 5411 } 5412 { 5413 Label loop; 5414 bind(loop); 5415 for (int i = 0; i < 16; i += 2) { 5416 stp(zr, zr, Address(base, i * BytesPerWord)); 5417 } 5418 add(base, base, 16 * BytesPerWord); 5419 if (loops > 1) { 5420 subs(rscratch2, rscratch2, 1); 5421 br(GE, loop); 5422 } 5423 } 5424 } 5425 cnt %= 16; 5426 int i = cnt & 1; // store any odd word to start 5427 if (i) str(zr, Address(base)); 5428 for (; i < (int)cnt; i += 2) { 5429 stp(zr, zr, Address(base, i * wordSize)); 5430 } 5431 BLOCK_COMMENT("} zero_words"); 5432 result = pc(); 5433 } else { 5434 mov(r10, base); mov(r11, cnt); 5435 result = zero_words(r10, r11); 5436 } 5437 return result; 5438 } 5439 5440 // Zero blocks of memory by using DC ZVA. 5441 // 5442 // Aligns the base address first sufficiently for DC ZVA, then uses 5443 // DC ZVA repeatedly for every full block. cnt is the size to be 5444 // zeroed in HeapWords. Returns the count of words left to be zeroed 5445 // in cnt. 5446 // 5447 // NOTE: This is intended to be used in the zero_blocks() stub. If 5448 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 5449 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 5450 Register tmp = rscratch1; 5451 Register tmp2 = rscratch2; 5452 int zva_length = VM_Version::zva_length(); 5453 Label initial_table_end, loop_zva; 5454 Label fini; 5455 5456 // Base must be 16 byte aligned. If not just return and let caller handle it 5457 tst(base, 0x0f); 5458 br(Assembler::NE, fini); 5459 // Align base with ZVA length. 5460 neg(tmp, base); 5461 andr(tmp, tmp, zva_length - 1); 5462 5463 // tmp: the number of bytes to be filled to align the base with ZVA length. 5464 add(base, base, tmp); 5465 sub(cnt, cnt, tmp, Assembler::ASR, 3); 5466 adr(tmp2, initial_table_end); 5467 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 5468 br(tmp2); 5469 5470 for (int i = -zva_length + 16; i < 0; i += 16) 5471 stp(zr, zr, Address(base, i)); 5472 bind(initial_table_end); 5473 5474 sub(cnt, cnt, zva_length >> 3); 5475 bind(loop_zva); 5476 dc(Assembler::ZVA, base); 5477 subs(cnt, cnt, zva_length >> 3); 5478 add(base, base, zva_length); 5479 br(Assembler::GE, loop_zva); 5480 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 5481 bind(fini); 5482 } 5483 5484 // base: Address of a buffer to be filled, 8 bytes aligned. 5485 // cnt: Count in 8-byte unit. 5486 // value: Value to be filled with. 5487 // base will point to the end of the buffer after filling. 5488 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 5489 { 5490 // Algorithm: 5491 // 5492 // if (cnt == 0) { 5493 // return; 5494 // } 5495 // if ((p & 8) != 0) { 5496 // *p++ = v; 5497 // } 5498 // 5499 // scratch1 = cnt & 14; 5500 // cnt -= scratch1; 5501 // p += scratch1; 5502 // switch (scratch1 / 2) { 5503 // do { 5504 // cnt -= 16; 5505 // p[-16] = v; 5506 // p[-15] = v; 5507 // case 7: 5508 // p[-14] = v; 5509 // p[-13] = v; 5510 // case 6: 5511 // p[-12] = v; 5512 // p[-11] = v; 5513 // // ... 5514 // case 1: 5515 // p[-2] = v; 5516 // p[-1] = v; 5517 // case 0: 5518 // p += 16; 5519 // } while (cnt); 5520 // } 5521 // if ((cnt & 1) == 1) { 5522 // *p++ = v; 5523 // } 5524 5525 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 5526 5527 Label fini, skip, entry, loop; 5528 const int unroll = 8; // Number of stp instructions we'll unroll 5529 5530 cbz(cnt, fini); 5531 tbz(base, 3, skip); 5532 str(value, Address(post(base, 8))); 5533 sub(cnt, cnt, 1); 5534 bind(skip); 5535 5536 andr(rscratch1, cnt, (unroll-1) * 2); 5537 sub(cnt, cnt, rscratch1); 5538 add(base, base, rscratch1, Assembler::LSL, 3); 5539 adr(rscratch2, entry); 5540 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 5541 br(rscratch2); 5542 5543 bind(loop); 5544 add(base, base, unroll * 16); 5545 for (int i = -unroll; i < 0; i++) 5546 stp(value, value, Address(base, i * 16)); 5547 bind(entry); 5548 subs(cnt, cnt, unroll * 2); 5549 br(Assembler::GE, loop); 5550 5551 tbz(cnt, 0, fini); 5552 str(value, Address(post(base, 8))); 5553 bind(fini); 5554 } 5555 5556 // Intrinsic for 5557 // 5558 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 5559 // return the number of characters copied. 5560 // - java/lang/StringUTF16.compress 5561 // return zero (0) if copy fails, otherwise 'len'. 5562 // 5563 // This version always returns the number of characters copied, and does not 5564 // clobber the 'len' register. A successful copy will complete with the post- 5565 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 5566 // post-condition: 0 <= 'res' < 'len'. 5567 // 5568 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 5569 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 5570 // beyond the acceptable, even though the footprint would be smaller. 5571 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 5572 // avoid additional bloat. 5573 // 5574 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 5575 void MacroAssembler::encode_iso_array(Register src, Register dst, 5576 Register len, Register res, bool ascii, 5577 FloatRegister vtmp0, FloatRegister vtmp1, 5578 FloatRegister vtmp2, FloatRegister vtmp3, 5579 FloatRegister vtmp4, FloatRegister vtmp5) 5580 { 5581 Register cnt = res; 5582 Register max = rscratch1; 5583 Register chk = rscratch2; 5584 5585 prfm(Address(src), PLDL1STRM); 5586 movw(cnt, len); 5587 5588 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 5589 5590 Label LOOP_32, DONE_32, FAIL_32; 5591 5592 BIND(LOOP_32); 5593 { 5594 cmpw(cnt, 32); 5595 br(LT, DONE_32); 5596 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 5597 // Extract lower bytes. 5598 FloatRegister vlo0 = vtmp4; 5599 FloatRegister vlo1 = vtmp5; 5600 uzp1(vlo0, T16B, vtmp0, vtmp1); 5601 uzp1(vlo1, T16B, vtmp2, vtmp3); 5602 // Merge bits... 5603 orr(vtmp0, T16B, vtmp0, vtmp1); 5604 orr(vtmp2, T16B, vtmp2, vtmp3); 5605 // Extract merged upper bytes. 5606 FloatRegister vhix = vtmp0; 5607 uzp2(vhix, T16B, vtmp0, vtmp2); 5608 // ISO-check on hi-parts (all zero). 5609 // ASCII-check on lo-parts (no sign). 5610 FloatRegister vlox = vtmp1; // Merge lower bytes. 5611 ASCII(orr(vlox, T16B, vlo0, vlo1)); 5612 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 5613 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 5614 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 5615 ASCII(orr(chk, chk, max)); 5616 cbnz(chk, FAIL_32); 5617 subw(cnt, cnt, 32); 5618 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 5619 b(LOOP_32); 5620 } 5621 BIND(FAIL_32); 5622 sub(src, src, 64); 5623 BIND(DONE_32); 5624 5625 Label LOOP_8, SKIP_8; 5626 5627 BIND(LOOP_8); 5628 { 5629 cmpw(cnt, 8); 5630 br(LT, SKIP_8); 5631 FloatRegister vhi = vtmp0; 5632 FloatRegister vlo = vtmp1; 5633 ld1(vtmp3, T8H, src); 5634 uzp1(vlo, T16B, vtmp3, vtmp3); 5635 uzp2(vhi, T16B, vtmp3, vtmp3); 5636 // ISO-check on hi-parts (all zero). 5637 // ASCII-check on lo-parts (no sign). 5638 ASCII(cm(LT, vtmp2, T16B, vlo)); 5639 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 5640 ASCII(umov(max, vtmp2, B, 0)); 5641 ASCII(orr(chk, chk, max)); 5642 cbnz(chk, SKIP_8); 5643 5644 strd(vlo, Address(post(dst, 8))); 5645 subw(cnt, cnt, 8); 5646 add(src, src, 16); 5647 b(LOOP_8); 5648 } 5649 BIND(SKIP_8); 5650 5651 #undef ASCII 5652 5653 Label LOOP, DONE; 5654 5655 cbz(cnt, DONE); 5656 BIND(LOOP); 5657 { 5658 Register chr = rscratch1; 5659 ldrh(chr, Address(post(src, 2))); 5660 tst(chr, ascii ? 0xff80 : 0xff00); 5661 br(NE, DONE); 5662 strb(chr, Address(post(dst, 1))); 5663 subs(cnt, cnt, 1); 5664 br(GT, LOOP); 5665 } 5666 BIND(DONE); 5667 // Return index where we stopped. 5668 subw(res, len, cnt); 5669 } 5670 5671 // Inflate byte[] array to char[]. 5672 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 5673 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 5674 FloatRegister vtmp1, FloatRegister vtmp2, 5675 FloatRegister vtmp3, Register tmp4) { 5676 Label big, done, after_init, to_stub; 5677 5678 assert_different_registers(src, dst, len, tmp4, rscratch1); 5679 5680 fmovd(vtmp1, 0.0); 5681 lsrw(tmp4, len, 3); 5682 bind(after_init); 5683 cbnzw(tmp4, big); 5684 // Short string: less than 8 bytes. 5685 { 5686 Label loop, tiny; 5687 5688 cmpw(len, 4); 5689 br(LT, tiny); 5690 // Use SIMD to do 4 bytes. 5691 ldrs(vtmp2, post(src, 4)); 5692 zip1(vtmp3, T8B, vtmp2, vtmp1); 5693 subw(len, len, 4); 5694 strd(vtmp3, post(dst, 8)); 5695 5696 cbzw(len, done); 5697 5698 // Do the remaining bytes by steam. 5699 bind(loop); 5700 ldrb(tmp4, post(src, 1)); 5701 strh(tmp4, post(dst, 2)); 5702 subw(len, len, 1); 5703 5704 bind(tiny); 5705 cbnz(len, loop); 5706 5707 b(done); 5708 } 5709 5710 if (SoftwarePrefetchHintDistance >= 0) { 5711 bind(to_stub); 5712 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 5713 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 5714 address tpc = trampoline_call(stub); 5715 if (tpc == nullptr) { 5716 DEBUG_ONLY(reset_labels(big, done)); 5717 postcond(pc() == badAddress); 5718 return nullptr; 5719 } 5720 b(after_init); 5721 } 5722 5723 // Unpack the bytes 8 at a time. 5724 bind(big); 5725 { 5726 Label loop, around, loop_last, loop_start; 5727 5728 if (SoftwarePrefetchHintDistance >= 0) { 5729 const int large_loop_threshold = (64 + 16)/8; 5730 ldrd(vtmp2, post(src, 8)); 5731 andw(len, len, 7); 5732 cmp(tmp4, (u1)large_loop_threshold); 5733 br(GE, to_stub); 5734 b(loop_start); 5735 5736 bind(loop); 5737 ldrd(vtmp2, post(src, 8)); 5738 bind(loop_start); 5739 subs(tmp4, tmp4, 1); 5740 br(EQ, loop_last); 5741 zip1(vtmp2, T16B, vtmp2, vtmp1); 5742 ldrd(vtmp3, post(src, 8)); 5743 st1(vtmp2, T8H, post(dst, 16)); 5744 subs(tmp4, tmp4, 1); 5745 zip1(vtmp3, T16B, vtmp3, vtmp1); 5746 st1(vtmp3, T8H, post(dst, 16)); 5747 br(NE, loop); 5748 b(around); 5749 bind(loop_last); 5750 zip1(vtmp2, T16B, vtmp2, vtmp1); 5751 st1(vtmp2, T8H, post(dst, 16)); 5752 bind(around); 5753 cbz(len, done); 5754 } else { 5755 andw(len, len, 7); 5756 bind(loop); 5757 ldrd(vtmp2, post(src, 8)); 5758 sub(tmp4, tmp4, 1); 5759 zip1(vtmp3, T16B, vtmp2, vtmp1); 5760 st1(vtmp3, T8H, post(dst, 16)); 5761 cbnz(tmp4, loop); 5762 } 5763 } 5764 5765 // Do the tail of up to 8 bytes. 5766 add(src, src, len); 5767 ldrd(vtmp3, Address(src, -8)); 5768 add(dst, dst, len, ext::uxtw, 1); 5769 zip1(vtmp3, T16B, vtmp3, vtmp1); 5770 strq(vtmp3, Address(dst, -16)); 5771 5772 bind(done); 5773 postcond(pc() != badAddress); 5774 return pc(); 5775 } 5776 5777 // Compress char[] array to byte[]. 5778 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 5779 Register res, 5780 FloatRegister tmp0, FloatRegister tmp1, 5781 FloatRegister tmp2, FloatRegister tmp3, 5782 FloatRegister tmp4, FloatRegister tmp5) { 5783 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 5784 // Adjust result: res == len ? len : 0 5785 cmp(len, res); 5786 csel(res, res, zr, EQ); 5787 } 5788 5789 // java.math.round(double a) 5790 // Returns the closest long to the argument, with ties rounding to 5791 // positive infinity. This requires some fiddling for corner 5792 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 5793 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 5794 FloatRegister ftmp) { 5795 Label DONE; 5796 BLOCK_COMMENT("java_round_double: { "); 5797 fmovd(rscratch1, src); 5798 // Use RoundToNearestTiesAway unless src small and -ve. 5799 fcvtasd(dst, src); 5800 // Test if src >= 0 || abs(src) >= 0x1.0p52 5801 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 5802 mov(rscratch2, julong_cast(0x1.0p52)); 5803 cmp(rscratch1, rscratch2); 5804 br(HS, DONE); { 5805 // src < 0 && abs(src) < 0x1.0p52 5806 // src may have a fractional part, so add 0.5 5807 fmovd(ftmp, 0.5); 5808 faddd(ftmp, src, ftmp); 5809 // Convert double to jlong, use RoundTowardsNegative 5810 fcvtmsd(dst, ftmp); 5811 } 5812 bind(DONE); 5813 BLOCK_COMMENT("} java_round_double"); 5814 } 5815 5816 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 5817 FloatRegister ftmp) { 5818 Label DONE; 5819 BLOCK_COMMENT("java_round_float: { "); 5820 fmovs(rscratch1, src); 5821 // Use RoundToNearestTiesAway unless src small and -ve. 5822 fcvtassw(dst, src); 5823 // Test if src >= 0 || abs(src) >= 0x1.0p23 5824 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 5825 mov(rscratch2, jint_cast(0x1.0p23f)); 5826 cmp(rscratch1, rscratch2); 5827 br(HS, DONE); { 5828 // src < 0 && |src| < 0x1.0p23 5829 // src may have a fractional part, so add 0.5 5830 fmovs(ftmp, 0.5f); 5831 fadds(ftmp, src, ftmp); 5832 // Convert float to jint, use RoundTowardsNegative 5833 fcvtmssw(dst, ftmp); 5834 } 5835 bind(DONE); 5836 BLOCK_COMMENT("} java_round_float"); 5837 } 5838 5839 // get_thread() can be called anywhere inside generated code so we 5840 // need to save whatever non-callee save context might get clobbered 5841 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 5842 // the call setup code. 5843 // 5844 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 5845 // On other systems, the helper is a usual C function. 5846 // 5847 void MacroAssembler::get_thread(Register dst) { 5848 RegSet saved_regs = 5849 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 5850 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 5851 5852 protect_return_address(); 5853 push(saved_regs, sp); 5854 5855 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 5856 blr(lr); 5857 if (dst != c_rarg0) { 5858 mov(dst, c_rarg0); 5859 } 5860 5861 pop(saved_regs, sp); 5862 authenticate_return_address(); 5863 } 5864 5865 void MacroAssembler::cache_wb(Address line) { 5866 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 5867 assert(line.index() == noreg, "index should be noreg"); 5868 assert(line.offset() == 0, "offset should be 0"); 5869 // would like to assert this 5870 // assert(line._ext.shift == 0, "shift should be zero"); 5871 if (VM_Version::supports_dcpop()) { 5872 // writeback using clear virtual address to point of persistence 5873 dc(Assembler::CVAP, line.base()); 5874 } else { 5875 // no need to generate anything as Unsafe.writebackMemory should 5876 // never invoke this stub 5877 } 5878 } 5879 5880 void MacroAssembler::cache_wbsync(bool is_pre) { 5881 // we only need a barrier post sync 5882 if (!is_pre) { 5883 membar(Assembler::AnyAny); 5884 } 5885 } 5886 5887 void MacroAssembler::verify_sve_vector_length(Register tmp) { 5888 // Make sure that native code does not change SVE vector length. 5889 if (!UseSVE) return; 5890 Label verify_ok; 5891 movw(tmp, zr); 5892 sve_inc(tmp, B); 5893 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 5894 br(EQ, verify_ok); 5895 stop("Error: SVE vector length has changed since jvm startup"); 5896 bind(verify_ok); 5897 } 5898 5899 void MacroAssembler::verify_ptrue() { 5900 Label verify_ok; 5901 if (!UseSVE) { 5902 return; 5903 } 5904 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 5905 sve_dec(rscratch1, B); 5906 cbz(rscratch1, verify_ok); 5907 stop("Error: the preserved predicate register (p7) elements are not all true"); 5908 bind(verify_ok); 5909 } 5910 5911 void MacroAssembler::safepoint_isb() { 5912 isb(); 5913 #ifndef PRODUCT 5914 if (VerifyCrossModifyFence) { 5915 // Clear the thread state. 5916 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 5917 } 5918 #endif 5919 } 5920 5921 #ifndef PRODUCT 5922 void MacroAssembler::verify_cross_modify_fence_not_required() { 5923 if (VerifyCrossModifyFence) { 5924 // Check if thread needs a cross modify fence. 5925 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 5926 Label fence_not_required; 5927 cbz(rscratch1, fence_not_required); 5928 // If it does then fail. 5929 lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)); 5930 mov(c_rarg0, rthread); 5931 blr(rscratch1); 5932 bind(fence_not_required); 5933 } 5934 } 5935 #endif 5936 5937 void MacroAssembler::spin_wait() { 5938 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 5939 switch (VM_Version::spin_wait_desc().inst()) { 5940 case SpinWait::NOP: 5941 nop(); 5942 break; 5943 case SpinWait::ISB: 5944 isb(); 5945 break; 5946 case SpinWait::YIELD: 5947 yield(); 5948 break; 5949 default: 5950 ShouldNotReachHere(); 5951 } 5952 } 5953 } 5954 5955 // Stack frame creation/removal 5956 5957 void MacroAssembler::enter(bool strip_ret_addr) { 5958 if (strip_ret_addr) { 5959 // Addresses can only be signed once. If there are multiple nested frames being created 5960 // in the same function, then the return address needs stripping first. 5961 strip_return_address(); 5962 } 5963 protect_return_address(); 5964 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5965 mov(rfp, sp); 5966 } 5967 5968 void MacroAssembler::leave() { 5969 mov(sp, rfp); 5970 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5971 authenticate_return_address(); 5972 } 5973 5974 // ROP Protection 5975 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 5976 // destroying stack frames or whenever directly loading/storing the LR to memory. 5977 // If ROP protection is not set then these functions are no-ops. 5978 // For more details on PAC see pauth_aarch64.hpp. 5979 5980 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 5981 // Uses the FP as the modifier. 5982 // 5983 void MacroAssembler::protect_return_address() { 5984 if (VM_Version::use_rop_protection()) { 5985 check_return_address(); 5986 // The standard convention for C code is to use paciasp, which uses SP as the modifier. This 5987 // works because in C code, FP and SP match on function entry. In the JDK, SP and FP may not 5988 // match, so instead explicitly use the FP. 5989 pacia(lr, rfp); 5990 } 5991 } 5992 5993 // Sign the return value in the given register. Use before updating the LR in the existing stack 5994 // frame for the current function. 5995 // Uses the FP from the start of the function as the modifier - which is stored at the address of 5996 // the current FP. 5997 // 5998 void MacroAssembler::protect_return_address(Register return_reg, Register temp_reg) { 5999 if (VM_Version::use_rop_protection()) { 6000 assert(PreserveFramePointer, "PreserveFramePointer must be set for ROP protection"); 6001 check_return_address(return_reg); 6002 ldr(temp_reg, Address(rfp)); 6003 pacia(return_reg, temp_reg); 6004 } 6005 } 6006 6007 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6008 // 6009 void MacroAssembler::authenticate_return_address(Register return_reg) { 6010 if (VM_Version::use_rop_protection()) { 6011 autia(return_reg, rfp); 6012 check_return_address(return_reg); 6013 } 6014 } 6015 6016 // Authenticate the return value in the given register. Use before updating the LR in the existing 6017 // stack frame for the current function. 6018 // Uses the FP from the start of the function as the modifier - which is stored at the address of 6019 // the current FP. 6020 // 6021 void MacroAssembler::authenticate_return_address(Register return_reg, Register temp_reg) { 6022 if (VM_Version::use_rop_protection()) { 6023 assert(PreserveFramePointer, "PreserveFramePointer must be set for ROP protection"); 6024 ldr(temp_reg, Address(rfp)); 6025 autia(return_reg, temp_reg); 6026 check_return_address(return_reg); 6027 } 6028 } 6029 6030 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6031 // there is no guaranteed way of authenticating the LR. 6032 // 6033 void MacroAssembler::strip_return_address() { 6034 if (VM_Version::use_rop_protection()) { 6035 xpaclri(); 6036 } 6037 } 6038 6039 #ifndef PRODUCT 6040 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6041 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6042 // it is difficult to debug back to the callee function. 6043 // This function simply loads from the address in the given register. 6044 // Use directly after authentication to catch authentication failures. 6045 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6046 // 6047 void MacroAssembler::check_return_address(Register return_reg) { 6048 if (VM_Version::use_rop_protection()) { 6049 ldr(zr, Address(return_reg)); 6050 } 6051 } 6052 #endif 6053 6054 // The java_calling_convention describes stack locations as ideal slots on 6055 // a frame with no abi restrictions. Since we must observe abi restrictions 6056 // (like the placement of the register window) the slots must be biased by 6057 // the following value. 6058 static int reg2offset_in(VMReg r) { 6059 // Account for saved rfp and lr 6060 // This should really be in_preserve_stack_slots 6061 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6062 } 6063 6064 static int reg2offset_out(VMReg r) { 6065 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6066 } 6067 6068 // On 64bit we will store integer like items to the stack as 6069 // 64bits items (AArch64 ABI) even though java would only store 6070 // 32bits for a parameter. On 32bit it will simply be 32bits 6071 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6072 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6073 if (src.first()->is_stack()) { 6074 if (dst.first()->is_stack()) { 6075 // stack to stack 6076 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6077 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6078 } else { 6079 // stack to reg 6080 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6081 } 6082 } else if (dst.first()->is_stack()) { 6083 // reg to stack 6084 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6085 } else { 6086 if (dst.first() != src.first()) { 6087 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6088 } 6089 } 6090 } 6091 6092 // An oop arg. Must pass a handle not the oop itself 6093 void MacroAssembler::object_move( 6094 OopMap* map, 6095 int oop_handle_offset, 6096 int framesize_in_slots, 6097 VMRegPair src, 6098 VMRegPair dst, 6099 bool is_receiver, 6100 int* receiver_offset) { 6101 6102 // must pass a handle. First figure out the location we use as a handle 6103 6104 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6105 6106 // See if oop is null if it is we need no handle 6107 6108 if (src.first()->is_stack()) { 6109 6110 // Oop is already on the stack as an argument 6111 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6112 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6113 if (is_receiver) { 6114 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6115 } 6116 6117 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6118 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6119 // conditionally move a null 6120 cmp(rscratch1, zr); 6121 csel(rHandle, zr, rHandle, Assembler::EQ); 6122 } else { 6123 6124 // Oop is in an a register we must store it to the space we reserve 6125 // on the stack for oop_handles and pass a handle if oop is non-null 6126 6127 const Register rOop = src.first()->as_Register(); 6128 int oop_slot; 6129 if (rOop == j_rarg0) 6130 oop_slot = 0; 6131 else if (rOop == j_rarg1) 6132 oop_slot = 1; 6133 else if (rOop == j_rarg2) 6134 oop_slot = 2; 6135 else if (rOop == j_rarg3) 6136 oop_slot = 3; 6137 else if (rOop == j_rarg4) 6138 oop_slot = 4; 6139 else if (rOop == j_rarg5) 6140 oop_slot = 5; 6141 else if (rOop == j_rarg6) 6142 oop_slot = 6; 6143 else { 6144 assert(rOop == j_rarg7, "wrong register"); 6145 oop_slot = 7; 6146 } 6147 6148 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6149 int offset = oop_slot*VMRegImpl::stack_slot_size; 6150 6151 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6152 // Store oop in handle area, may be null 6153 str(rOop, Address(sp, offset)); 6154 if (is_receiver) { 6155 *receiver_offset = offset; 6156 } 6157 6158 cmp(rOop, zr); 6159 lea(rHandle, Address(sp, offset)); 6160 // conditionally move a null 6161 csel(rHandle, zr, rHandle, Assembler::EQ); 6162 } 6163 6164 // If arg is on the stack then place it otherwise it is already in correct reg. 6165 if (dst.first()->is_stack()) { 6166 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6167 } 6168 } 6169 6170 // A float arg may have to do float reg int reg conversion 6171 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6172 if (src.first()->is_stack()) { 6173 if (dst.first()->is_stack()) { 6174 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6175 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6176 } else { 6177 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6178 } 6179 } else if (src.first() != dst.first()) { 6180 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6181 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6182 else 6183 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6184 } 6185 } 6186 6187 // A long move 6188 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6189 if (src.first()->is_stack()) { 6190 if (dst.first()->is_stack()) { 6191 // stack to stack 6192 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6193 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6194 } else { 6195 // stack to reg 6196 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6197 } 6198 } else if (dst.first()->is_stack()) { 6199 // reg to stack 6200 // Do we really have to sign extend??? 6201 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6202 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6203 } else { 6204 if (dst.first() != src.first()) { 6205 mov(dst.first()->as_Register(), src.first()->as_Register()); 6206 } 6207 } 6208 } 6209 6210 6211 // A double move 6212 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6213 if (src.first()->is_stack()) { 6214 if (dst.first()->is_stack()) { 6215 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6216 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6217 } else { 6218 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6219 } 6220 } else if (src.first() != dst.first()) { 6221 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6222 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6223 else 6224 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6225 } 6226 } 6227 6228 // Implements lightweight-locking. 6229 // Branches to slow upon failure to lock the object, with ZF cleared. 6230 // Falls through upon success with ZF set. 6231 // 6232 // - obj: the object to be locked 6233 // - hdr: the header, already loaded from obj, will be destroyed 6234 // - t1, t2: temporary registers, will be destroyed 6235 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) { 6236 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6237 assert_different_registers(obj, hdr, t1, t2, rscratch1); 6238 6239 // Check if we would have space on lock-stack for the object. 6240 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6241 cmpw(t1, (unsigned)LockStack::end_offset() - 1); 6242 br(Assembler::GT, slow); 6243 6244 // Load (object->mark() | 1) into hdr 6245 orr(hdr, hdr, markWord::unlocked_value); 6246 // Clear lock-bits, into t2 6247 eor(t2, hdr, markWord::unlocked_value); 6248 // Try to swing header from unlocked to locked 6249 // Clobbers rscratch1 when UseLSE is false 6250 cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword, 6251 /*acquire*/ true, /*release*/ true, /*weak*/ false, t1); 6252 br(Assembler::NE, slow); 6253 6254 // After successful lock, push object on lock-stack 6255 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6256 str(obj, Address(rthread, t1)); 6257 addw(t1, t1, oopSize); 6258 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6259 } 6260 6261 // Implements lightweight-unlocking. 6262 // Branches to slow upon failure, with ZF cleared. 6263 // Falls through upon success, with ZF set. 6264 // 6265 // - obj: the object to be unlocked 6266 // - hdr: the (pre-loaded) header of the object 6267 // - t1, t2: temporary registers 6268 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) { 6269 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6270 assert_different_registers(obj, hdr, t1, t2, rscratch1); 6271 6272 #ifdef ASSERT 6273 { 6274 // The following checks rely on the fact that LockStack is only ever modified by 6275 // its owning thread, even if the lock got inflated concurrently; removal of LockStack 6276 // entries after inflation will happen delayed in that case. 6277 6278 // Check for lock-stack underflow. 6279 Label stack_ok; 6280 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6281 cmpw(t1, (unsigned)LockStack::start_offset()); 6282 br(Assembler::GT, stack_ok); 6283 STOP("Lock-stack underflow"); 6284 bind(stack_ok); 6285 } 6286 { 6287 // Check if the top of the lock-stack matches the unlocked object. 6288 Label tos_ok; 6289 subw(t1, t1, oopSize); 6290 ldr(t1, Address(rthread, t1)); 6291 cmpoop(t1, obj); 6292 br(Assembler::EQ, tos_ok); 6293 STOP("Top of lock-stack does not match the unlocked object"); 6294 bind(tos_ok); 6295 } 6296 { 6297 // Check that hdr is fast-locked. 6298 Label hdr_ok; 6299 tst(hdr, markWord::lock_mask_in_place); 6300 br(Assembler::EQ, hdr_ok); 6301 STOP("Header is not fast-locked"); 6302 bind(hdr_ok); 6303 } 6304 #endif 6305 6306 // Load the new header (unlocked) into t1 6307 orr(t1, hdr, markWord::unlocked_value); 6308 6309 // Try to swing header from locked to unlocked 6310 // Clobbers rscratch1 when UseLSE is false 6311 cmpxchg(obj, hdr, t1, Assembler::xword, 6312 /*acquire*/ true, /*release*/ true, /*weak*/ false, t2); 6313 br(Assembler::NE, slow); 6314 6315 // After successful unlock, pop object from lock-stack 6316 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6317 subw(t1, t1, oopSize); 6318 #ifdef ASSERT 6319 str(zr, Address(rthread, t1)); 6320 #endif 6321 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6322 } --- EOF ---