1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "code/compiledIC.hpp" 32 #if INCLUDE_CDS 33 #include "code/SCCache.hpp" 34 #endif 35 #include "compiler/compileTask.hpp" 36 #include "compiler/disassembler.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/barrierSet.hpp" 39 #include "gc/shared/barrierSetAssembler.hpp" 40 #include "gc/shared/cardTableBarrierSet.hpp" 41 #include "gc/shared/cardTable.hpp" 42 #include "gc/shared/collectedHeap.hpp" 43 #include "gc/shared/tlab_globals.hpp" 44 #include "interpreter/bytecodeHistogram.hpp" 45 #include "interpreter/interpreter.hpp" 46 #include "jvm.h" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "nativeInst_aarch64.hpp" 50 #include "oops/accessDecorators.hpp" 51 #include "oops/compressedKlass.inline.hpp" 52 #include "oops/compressedOops.inline.hpp" 53 #include "oops/klass.inline.hpp" 54 #include "runtime/continuation.hpp" 55 #include "runtime/icache.hpp" 56 #include "runtime/interfaceSupport.inline.hpp" 57 #include "runtime/javaThread.hpp" 58 #include "runtime/jniHandles.inline.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "utilities/globalDefinitions.hpp" 62 #include "utilities/powerOfTwo.hpp" 63 #ifdef COMPILER1 64 #include "c1/c1_LIRAssembler.hpp" 65 #endif 66 #ifdef COMPILER2 67 #include "oops/oop.hpp" 68 #include "opto/compile.hpp" 69 #include "opto/node.hpp" 70 #include "opto/output.hpp" 71 #endif 72 73 #include <sys/types.h> 74 75 #ifdef PRODUCT 76 #define BLOCK_COMMENT(str) /* nothing */ 77 #else 78 #define BLOCK_COMMENT(str) block_comment(str) 79 #endif 80 #define STOP(str) stop(str); 81 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 82 83 #ifdef ASSERT 84 extern "C" void disnm(intptr_t p); 85 #endif 86 // Target-dependent relocation processing 87 // 88 // Instruction sequences whose target may need to be retrieved or 89 // patched are distinguished by their leading instruction, sorting 90 // them into three main instruction groups and related subgroups. 91 // 92 // 1) Branch, Exception and System (insn count = 1) 93 // 1a) Unconditional branch (immediate): 94 // b/bl imm19 95 // 1b) Compare & branch (immediate): 96 // cbz/cbnz Rt imm19 97 // 1c) Test & branch (immediate): 98 // tbz/tbnz Rt imm14 99 // 1d) Conditional branch (immediate): 100 // b.cond imm19 101 // 102 // 2) Loads and Stores (insn count = 1) 103 // 2a) Load register literal: 104 // ldr Rt imm19 105 // 106 // 3) Data Processing Immediate (insn count = 2 or 3) 107 // 3a) PC-rel. addressing 108 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 109 // adr/adrp Rx imm21; add Ry Rx #imm12 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 111 // adr/adrp Rx imm21 112 // adr/adrp Rx imm21; movk Rx #imm16<<32 113 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 114 // The latter form can only happen when the target is an 115 // ExternalAddress, and (by definition) ExternalAddresses don't 116 // move. Because of that property, there is never any need to 117 // patch the last of the three instructions. However, 118 // MacroAssembler::target_addr_for_insn takes all three 119 // instructions into account and returns the correct address. 120 // 3b) Move wide (immediate) 121 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 122 // 123 // A switch on a subset of the instruction's bits provides an 124 // efficient dispatch to these subcases. 125 // 126 // insn[28:26] -> main group ('x' == don't care) 127 // 00x -> UNALLOCATED 128 // 100 -> Data Processing Immediate 129 // 101 -> Branch, Exception and System 130 // x1x -> Loads and Stores 131 // 132 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 133 // n.b. in some cases extra bits need to be checked to verify the 134 // instruction is as expected 135 // 136 // 1) ... xx101x Branch, Exception and System 137 // 1a) 00___x Unconditional branch (immediate) 138 // 1b) 01___0 Compare & branch (immediate) 139 // 1c) 01___1 Test & branch (immediate) 140 // 1d) 10___0 Conditional branch (immediate) 141 // other Should not happen 142 // 143 // 2) ... xxx1x0 Loads and Stores 144 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 145 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 146 // strictly should be 64 bit non-FP/SIMD i.e. 147 // 0101_000 (i.e. requires insn[31:24] == 01011000) 148 // 149 // 3) ... xx100x Data Processing Immediate 150 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 151 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 152 // strictly should be 64 bit movz #imm16<<0 153 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 154 // 155 class RelocActions { 156 protected: 157 typedef int (*reloc_insn)(address insn_addr, address &target); 158 159 virtual reloc_insn adrpMem() = 0; 160 virtual reloc_insn adrpAdd() = 0; 161 virtual reloc_insn adrpMovk() = 0; 162 163 const address _insn_addr; 164 const uint32_t _insn; 165 166 static uint32_t insn_at(address insn_addr, int n) { 167 return ((uint32_t*)insn_addr)[n]; 168 } 169 uint32_t insn_at(int n) const { 170 return insn_at(_insn_addr, n); 171 } 172 173 public: 174 175 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 176 RelocActions(address insn_addr, uint32_t insn) 177 : _insn_addr(insn_addr), _insn(insn) {} 178 179 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 180 virtual int conditionalBranch(address insn_addr, address &target) = 0; 181 virtual int testAndBranch(address insn_addr, address &target) = 0; 182 virtual int loadStore(address insn_addr, address &target) = 0; 183 virtual int adr(address insn_addr, address &target) = 0; 184 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 185 virtual int immediate(address insn_addr, address &target) = 0; 186 virtual void verify(address insn_addr, address &target) = 0; 187 188 int ALWAYSINLINE run(address insn_addr, address &target) { 189 int instructions = 1; 190 191 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 192 switch(dispatch) { 193 case 0b001010: 194 case 0b001011: { 195 instructions = unconditionalBranch(insn_addr, target); 196 break; 197 } 198 case 0b101010: // Conditional branch (immediate) 199 case 0b011010: { // Compare & branch (immediate) 200 instructions = conditionalBranch(insn_addr, target); 201 break; 202 } 203 case 0b011011: { 204 instructions = testAndBranch(insn_addr, target); 205 break; 206 } 207 case 0b001100: 208 case 0b001110: 209 case 0b011100: 210 case 0b011110: 211 case 0b101100: 212 case 0b101110: 213 case 0b111100: 214 case 0b111110: { 215 // load/store 216 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 217 // Load register (literal) 218 instructions = loadStore(insn_addr, target); 219 break; 220 } else { 221 // nothing to do 222 assert(target == nullptr, "did not expect to relocate target for polling page load"); 223 } 224 break; 225 } 226 case 0b001000: 227 case 0b011000: 228 case 0b101000: 229 case 0b111000: { 230 // adr/adrp 231 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 232 int shift = Instruction_aarch64::extract(_insn, 31, 31); 233 if (shift) { 234 uint32_t insn2 = insn_at(1); 235 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 236 Instruction_aarch64::extract(_insn, 4, 0) == 237 Instruction_aarch64::extract(insn2, 9, 5)) { 238 instructions = adrp(insn_addr, target, adrpMem()); 239 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 240 Instruction_aarch64::extract(_insn, 4, 0) == 241 Instruction_aarch64::extract(insn2, 4, 0)) { 242 instructions = adrp(insn_addr, target, adrpAdd()); 243 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 244 Instruction_aarch64::extract(_insn, 4, 0) == 245 Instruction_aarch64::extract(insn2, 4, 0)) { 246 instructions = adrp(insn_addr, target, adrpMovk()); 247 } else { 248 ShouldNotReachHere(); 249 } 250 } else { 251 instructions = adr(insn_addr, target); 252 } 253 break; 254 } 255 case 0b001001: 256 case 0b011001: 257 case 0b101001: 258 case 0b111001: { 259 instructions = immediate(insn_addr, target); 260 break; 261 } 262 default: { 263 ShouldNotReachHere(); 264 } 265 } 266 267 verify(insn_addr, target); 268 return instructions * NativeInstruction::instruction_size; 269 } 270 }; 271 272 class Patcher : public RelocActions { 273 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 274 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 275 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 276 277 public: 278 Patcher(address insn_addr) : RelocActions(insn_addr) {} 279 280 virtual int unconditionalBranch(address insn_addr, address &target) { 281 intptr_t offset = (target - insn_addr) >> 2; 282 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 283 return 1; 284 } 285 virtual int conditionalBranch(address insn_addr, address &target) { 286 intptr_t offset = (target - insn_addr) >> 2; 287 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 288 return 1; 289 } 290 virtual int testAndBranch(address insn_addr, address &target) { 291 intptr_t offset = (target - insn_addr) >> 2; 292 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 293 return 1; 294 } 295 virtual int loadStore(address insn_addr, address &target) { 296 intptr_t offset = (target - insn_addr) >> 2; 297 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 298 return 1; 299 } 300 virtual int adr(address insn_addr, address &target) { 301 #ifdef ASSERT 302 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 303 #endif 304 // PC-rel. addressing 305 ptrdiff_t offset = target - insn_addr; 306 int offset_lo = offset & 3; 307 offset >>= 2; 308 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 309 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 310 return 1; 311 } 312 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 313 int instructions = 1; 314 #ifdef ASSERT 315 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 316 #endif 317 ptrdiff_t offset = target - insn_addr; 318 instructions = 2; 319 precond(inner != nullptr); 320 // Give the inner reloc a chance to modify the target. 321 address adjusted_target = target; 322 instructions = (*inner)(insn_addr, adjusted_target); 323 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 324 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 325 offset = adr_page - pc_page; 326 int offset_lo = offset & 3; 327 offset >>= 2; 328 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 329 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 330 return instructions; 331 } 332 static int adrpMem_impl(address insn_addr, address &target) { 333 uintptr_t dest = (uintptr_t)target; 334 int offset_lo = dest & 0xfff; 335 uint32_t insn2 = insn_at(insn_addr, 1); 336 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 337 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 338 guarantee(((dest >> size) << size) == dest, "misaligned target"); 339 return 2; 340 } 341 static int adrpAdd_impl(address insn_addr, address &target) { 342 uintptr_t dest = (uintptr_t)target; 343 int offset_lo = dest & 0xfff; 344 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 345 return 2; 346 } 347 static int adrpMovk_impl(address insn_addr, address &target) { 348 uintptr_t dest = uintptr_t(target); 349 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 350 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 351 target = address(dest); 352 return 2; 353 } 354 virtual int immediate(address insn_addr, address &target) { 355 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 356 // We encode narrow ones by setting the upper 16 bits in the first 357 // instruction. 358 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 359 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 360 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target); 361 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16); 362 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff); 363 return 2; 364 } 365 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 366 uint64_t dest = (uint64_t)target; 367 // Move wide constant 368 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 369 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 370 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 371 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 372 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 373 return 3; 374 } 375 virtual void verify(address insn_addr, address &target) { 376 #ifdef ASSERT 377 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 378 if (!(address_is == target)) { 379 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 380 disnm((intptr_t)insn_addr); 381 assert(address_is == target, "should be"); 382 } 383 #endif 384 } 385 }; 386 387 // If insn1 and insn2 use the same register to form an address, either 388 // by an offsetted LDR or a simple ADD, return the offset. If the 389 // second instruction is an LDR, the offset may be scaled. 390 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 391 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 392 Instruction_aarch64::extract(insn1, 4, 0) == 393 Instruction_aarch64::extract(insn2, 9, 5)) { 394 // Load/store register (unsigned immediate) 395 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 396 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 397 byte_offset <<= size; 398 return true; 399 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 400 Instruction_aarch64::extract(insn1, 4, 0) == 401 Instruction_aarch64::extract(insn2, 4, 0)) { 402 // add (immediate) 403 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 404 return true; 405 } 406 return false; 407 } 408 409 class AArch64Decoder : public RelocActions { 410 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 411 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 412 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 413 414 public: 415 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 416 417 virtual int loadStore(address insn_addr, address &target) { 418 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 419 target = insn_addr + (offset << 2); 420 return 1; 421 } 422 virtual int unconditionalBranch(address insn_addr, address &target) { 423 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 424 target = insn_addr + (offset << 2); 425 return 1; 426 } 427 virtual int conditionalBranch(address insn_addr, address &target) { 428 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 429 target = address(((uint64_t)insn_addr + (offset << 2))); 430 return 1; 431 } 432 virtual int testAndBranch(address insn_addr, address &target) { 433 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 434 target = address(((uint64_t)insn_addr + (offset << 2))); 435 return 1; 436 } 437 virtual int adr(address insn_addr, address &target) { 438 // PC-rel. addressing 439 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 440 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 441 target = address((uint64_t)insn_addr + offset); 442 return 1; 443 } 444 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 445 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 446 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 447 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 448 int shift = 12; 449 offset <<= shift; 450 uint64_t target_page = ((uint64_t)insn_addr) + offset; 451 target_page &= ((uint64_t)-1) << shift; 452 uint32_t insn2 = insn_at(1); 453 target = address(target_page); 454 precond(inner != nullptr); 455 (*inner)(insn_addr, target); 456 return 2; 457 } 458 static int adrpMem_impl(address insn_addr, address &target) { 459 uint32_t insn2 = insn_at(insn_addr, 1); 460 // Load/store register (unsigned immediate) 461 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 462 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 463 byte_offset <<= size; 464 target += byte_offset; 465 return 2; 466 } 467 static int adrpAdd_impl(address insn_addr, address &target) { 468 uint32_t insn2 = insn_at(insn_addr, 1); 469 // add (immediate) 470 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 471 target += byte_offset; 472 return 2; 473 } 474 static int adrpMovk_impl(address insn_addr, address &target) { 475 uint32_t insn2 = insn_at(insn_addr, 1); 476 uint64_t dest = uint64_t(target); 477 dest = (dest & 0xffff0000ffffffff) | 478 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 479 target = address(dest); 480 481 // We know the destination 4k page. Maybe we have a third 482 // instruction. 483 uint32_t insn = insn_at(insn_addr, 0); 484 uint32_t insn3 = insn_at(insn_addr, 2); 485 ptrdiff_t byte_offset; 486 if (offset_for(insn, insn3, byte_offset)) { 487 target += byte_offset; 488 return 3; 489 } else { 490 return 2; 491 } 492 } 493 virtual int immediate(address insn_addr, address &target) { 494 uint32_t *insns = (uint32_t *)insn_addr; 495 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 496 // We encode narrow ones by setting the upper 16 bits in the first 497 // instruction. 498 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 499 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 500 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16) 501 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5))); 502 target = (address)CompressedKlassPointers::decode(nk); 503 return 2; 504 } 505 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 506 // Move wide constant: movz, movk, movk. See movptr(). 507 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 508 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 509 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 510 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 511 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 512 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 513 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 514 return 3; 515 } 516 virtual void verify(address insn_addr, address &target) { 517 } 518 }; 519 520 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 521 AArch64Decoder decoder(insn_addr, insn); 522 address target; 523 decoder.run(insn_addr, target); 524 return target; 525 } 526 527 // Patch any kind of instruction; there may be several instructions. 528 // Return the total length (in bytes) of the instructions. 529 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 530 Patcher patcher(insn_addr); 531 return patcher.run(insn_addr, target); 532 } 533 534 int MacroAssembler::patch_oop(address insn_addr, address o) { 535 int instructions; 536 unsigned insn = *(unsigned*)insn_addr; 537 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 538 539 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 540 // narrow OOPs by setting the upper 16 bits in the first 541 // instruction. 542 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 543 // Move narrow OOP 544 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 545 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 546 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 547 instructions = 2; 548 } else { 549 // Move wide OOP 550 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 551 uintptr_t dest = (uintptr_t)o; 552 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 553 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 554 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 555 instructions = 3; 556 } 557 return instructions * NativeInstruction::instruction_size; 558 } 559 560 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 561 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 562 // We encode narrow ones by setting the upper 16 bits in the first 563 // instruction. 564 NativeInstruction *insn = nativeInstruction_at(insn_addr); 565 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 566 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 567 568 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 569 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 570 return 2 * NativeInstruction::instruction_size; 571 } 572 573 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 574 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 575 return nullptr; 576 } 577 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 578 } 579 580 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 581 if (acquire) { 582 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 583 ldar(tmp, tmp); 584 } else { 585 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 586 } 587 if (at_return) { 588 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 589 // we may safely use the sp instead to perform the stack watermark check. 590 cmp(in_nmethod ? sp : rfp, tmp); 591 br(Assembler::HI, slow_path); 592 } else { 593 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 594 } 595 } 596 597 void MacroAssembler::rt_call(address dest, Register tmp) { 598 CodeBlob *cb = CodeCache::find_blob(dest); 599 if (cb) { 600 far_call(RuntimeAddress(dest)); 601 } else { 602 lea(tmp, RuntimeAddress(dest)); 603 blr(tmp); 604 } 605 } 606 607 void MacroAssembler::push_cont_fastpath(Register java_thread) { 608 if (!Continuations::enabled()) return; 609 Label done; 610 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 611 cmp(sp, rscratch1); 612 br(Assembler::LS, done); 613 mov(rscratch1, sp); // we can't use sp as the source in str 614 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 615 bind(done); 616 } 617 618 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 619 if (!Continuations::enabled()) return; 620 Label done; 621 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 622 cmp(sp, rscratch1); 623 br(Assembler::LO, done); 624 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 625 bind(done); 626 } 627 628 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 629 // we must set sp to zero to clear frame 630 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 631 632 // must clear fp, so that compiled frames are not confused; it is 633 // possible that we need it only for debugging 634 if (clear_fp) { 635 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 636 } 637 638 // Always clear the pc because it could have been set by make_walkable() 639 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 640 } 641 642 // Calls to C land 643 // 644 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 645 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 646 // has to be reset to 0. This is required to allow proper stack traversal. 647 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 648 Register last_java_fp, 649 Register last_java_pc, 650 Register scratch) { 651 652 if (last_java_pc->is_valid()) { 653 str(last_java_pc, Address(rthread, 654 JavaThread::frame_anchor_offset() 655 + JavaFrameAnchor::last_Java_pc_offset())); 656 } 657 658 // determine last_java_sp register 659 if (last_java_sp == sp) { 660 mov(scratch, sp); 661 last_java_sp = scratch; 662 } else if (!last_java_sp->is_valid()) { 663 last_java_sp = esp; 664 } 665 666 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 667 668 // last_java_fp is optional 669 if (last_java_fp->is_valid()) { 670 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 671 } 672 } 673 674 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 675 Register last_java_fp, 676 address last_java_pc, 677 Register scratch) { 678 assert(last_java_pc != nullptr, "must provide a valid PC"); 679 680 adr(scratch, last_java_pc); 681 str(scratch, Address(rthread, 682 JavaThread::frame_anchor_offset() 683 + JavaFrameAnchor::last_Java_pc_offset())); 684 685 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 686 } 687 688 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 689 Register last_java_fp, 690 Label &L, 691 Register scratch) { 692 if (L.is_bound()) { 693 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 694 } else { 695 InstructionMark im(this); 696 L.add_patch_at(code(), locator()); 697 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 698 } 699 } 700 701 static inline bool target_needs_far_branch(address addr) { 702 if (SCCache::is_on_for_write()) { 703 return true; 704 } 705 // codecache size <= 128M 706 if (!MacroAssembler::far_branches()) { 707 return false; 708 } 709 // codecache size > 240M 710 if (MacroAssembler::codestub_branch_needs_far_jump()) { 711 return true; 712 } 713 // codecache size: 128M..240M 714 return !CodeCache::is_non_nmethod(addr); 715 } 716 717 void MacroAssembler::far_call(Address entry, Register tmp) { 718 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 719 assert(CodeCache::find_blob(entry.target()) != nullptr, 720 "destination of far call not found in code cache"); 721 assert(entry.rspec().type() == relocInfo::external_word_type 722 || entry.rspec().type() == relocInfo::runtime_call_type 723 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 724 if (target_needs_far_branch(entry.target())) { 725 uint64_t offset; 726 // We can use ADRP here because we know that the total size of 727 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 728 adrp(tmp, entry, offset); 729 add(tmp, tmp, offset); 730 blr(tmp); 731 } else { 732 bl(entry); 733 } 734 } 735 736 int MacroAssembler::far_jump(Address entry, Register tmp) { 737 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 738 assert(CodeCache::find_blob(entry.target()) != nullptr, 739 "destination of far call not found in code cache"); 740 assert(entry.rspec().type() == relocInfo::external_word_type 741 || entry.rspec().type() == relocInfo::runtime_call_type 742 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 743 address start = pc(); 744 if (target_needs_far_branch(entry.target())) { 745 uint64_t offset; 746 // We can use ADRP here because we know that the total size of 747 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 748 adrp(tmp, entry, offset); 749 add(tmp, tmp, offset); 750 br(tmp); 751 } else { 752 b(entry); 753 } 754 return pc() - start; 755 } 756 757 void MacroAssembler::reserved_stack_check() { 758 // testing if reserved zone needs to be enabled 759 Label no_reserved_zone_enabling; 760 761 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 762 cmp(sp, rscratch1); 763 br(Assembler::LO, no_reserved_zone_enabling); 764 765 enter(); // LR and FP are live. 766 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone))); 767 mov(c_rarg0, rthread); 768 blr(rscratch1); 769 leave(); 770 771 // We have already removed our own frame. 772 // throw_delayed_StackOverflowError will think that it's been 773 // called by our caller. 774 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 775 br(rscratch1); 776 should_not_reach_here(); 777 778 bind(no_reserved_zone_enabling); 779 } 780 781 static void pass_arg0(MacroAssembler* masm, Register arg) { 782 if (c_rarg0 != arg ) { 783 masm->mov(c_rarg0, arg); 784 } 785 } 786 787 static void pass_arg1(MacroAssembler* masm, Register arg) { 788 if (c_rarg1 != arg ) { 789 masm->mov(c_rarg1, arg); 790 } 791 } 792 793 static void pass_arg2(MacroAssembler* masm, Register arg) { 794 if (c_rarg2 != arg ) { 795 masm->mov(c_rarg2, arg); 796 } 797 } 798 799 static void pass_arg3(MacroAssembler* masm, Register arg) { 800 if (c_rarg3 != arg ) { 801 masm->mov(c_rarg3, arg); 802 } 803 } 804 805 void MacroAssembler::call_VM_base(Register oop_result, 806 Register java_thread, 807 Register last_java_sp, 808 address entry_point, 809 int number_of_arguments, 810 bool check_exceptions) { 811 // determine java_thread register 812 if (!java_thread->is_valid()) { 813 java_thread = rthread; 814 } 815 816 // determine last_java_sp register 817 if (!last_java_sp->is_valid()) { 818 last_java_sp = esp; 819 } 820 821 // debugging support 822 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 823 assert(java_thread == rthread, "unexpected register"); 824 #ifdef ASSERT 825 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 826 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 827 #endif // ASSERT 828 829 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 830 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 831 832 // push java thread (becomes first argument of C function) 833 834 mov(c_rarg0, java_thread); 835 836 // set last Java frame before call 837 assert(last_java_sp != rfp, "can't use rfp"); 838 839 Label l; 840 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 841 842 // do the call, remove parameters 843 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 844 845 // lr could be poisoned with PAC signature during throw_pending_exception 846 // if it was tail-call optimized by compiler, since lr is not callee-saved 847 // reload it with proper value 848 adr(lr, l); 849 850 // reset last Java frame 851 // Only interpreter should have to clear fp 852 reset_last_Java_frame(true); 853 854 // C++ interp handles this in the interpreter 855 check_and_handle_popframe(java_thread); 856 check_and_handle_earlyret(java_thread); 857 858 if (check_exceptions) { 859 // check for pending exceptions (java_thread is set upon return) 860 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 861 Label ok; 862 cbz(rscratch1, ok); 863 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 864 br(rscratch1); 865 bind(ok); 866 } 867 868 // get oop result if there is one and reset the value in the thread 869 if (oop_result->is_valid()) { 870 get_vm_result(oop_result, java_thread); 871 } 872 } 873 874 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 875 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 876 } 877 878 // Check the entry target is always reachable from any branch. 879 static bool is_always_within_branch_range(Address entry) { 880 if (SCCache::is_on_for_write()) { 881 return false; 882 } 883 const address target = entry.target(); 884 885 if (!CodeCache::contains(target)) { 886 // We always use trampolines for callees outside CodeCache. 887 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 888 return false; 889 } 890 891 if (!MacroAssembler::far_branches()) { 892 return true; 893 } 894 895 if (entry.rspec().type() == relocInfo::runtime_call_type) { 896 // Runtime calls are calls of a non-compiled method (stubs, adapters). 897 // Non-compiled methods stay forever in CodeCache. 898 // We check whether the longest possible branch is within the branch range. 899 assert(CodeCache::find_blob(target) != nullptr && 900 !CodeCache::find_blob(target)->is_nmethod(), 901 "runtime call of compiled method"); 902 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 903 const address left_longest_branch_start = CodeCache::low_bound(); 904 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 905 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 906 return is_reachable; 907 } 908 909 return false; 910 } 911 912 // Maybe emit a call via a trampoline. If the code cache is small 913 // trampolines won't be emitted. 914 address MacroAssembler::trampoline_call(Address entry) { 915 assert(entry.rspec().type() == relocInfo::runtime_call_type 916 || entry.rspec().type() == relocInfo::opt_virtual_call_type 917 || entry.rspec().type() == relocInfo::static_call_type 918 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 919 920 address target = entry.target(); 921 922 if (!is_always_within_branch_range(entry)) { 923 if (!in_scratch_emit_size()) { 924 // We don't want to emit a trampoline if C2 is generating dummy 925 // code during its branch shortening phase. 926 if (entry.rspec().type() == relocInfo::runtime_call_type) { 927 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 928 code()->share_trampoline_for(entry.target(), offset()); 929 } else { 930 address stub = emit_trampoline_stub(offset(), target); 931 if (stub == nullptr) { 932 postcond(pc() == badAddress); 933 return nullptr; // CodeCache is full 934 } 935 } 936 } 937 target = pc(); 938 } 939 940 address call_pc = pc(); 941 relocate(entry.rspec()); 942 bl(target); 943 944 postcond(pc() != badAddress); 945 return call_pc; 946 } 947 948 // Emit a trampoline stub for a call to a target which is too far away. 949 // 950 // code sequences: 951 // 952 // call-site: 953 // branch-and-link to <destination> or <trampoline stub> 954 // 955 // Related trampoline stub for this call site in the stub section: 956 // load the call target from the constant pool 957 // branch (LR still points to the call site above) 958 959 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 960 address dest) { 961 // Max stub size: alignment nop, TrampolineStub. 962 address stub = start_a_stub(max_trampoline_stub_size()); 963 if (stub == nullptr) { 964 return nullptr; // CodeBuffer::expand failed 965 } 966 967 // Create a trampoline stub relocation which relates this trampoline stub 968 // with the call instruction at insts_call_instruction_offset in the 969 // instructions code-section. 970 align(wordSize); 971 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 972 + insts_call_instruction_offset)); 973 const int stub_start_offset = offset(); 974 975 // Now, create the trampoline stub's code: 976 // - load the call 977 // - call 978 Label target; 979 ldr(rscratch1, target); 980 br(rscratch1); 981 bind(target); 982 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 983 "should be"); 984 emit_int64((int64_t)dest); 985 986 const address stub_start_addr = addr_at(stub_start_offset); 987 988 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 989 990 end_a_stub(); 991 return stub_start_addr; 992 } 993 994 int MacroAssembler::max_trampoline_stub_size() { 995 // Max stub size: alignment nop, TrampolineStub. 996 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 997 } 998 999 void MacroAssembler::emit_static_call_stub() { 1000 // CompiledDirectCall::set_to_interpreted knows the 1001 // exact layout of this stub. 1002 1003 isb(); 1004 mov_metadata(rmethod, nullptr); 1005 1006 // Jump to the entry point of the c2i stub. 1007 movptr(rscratch1, 0); 1008 br(rscratch1); 1009 } 1010 1011 int MacroAssembler::static_call_stub_size() { 1012 // isb; movk; movz; movz; movk; movz; movz; br 1013 return 8 * NativeInstruction::instruction_size; 1014 } 1015 1016 void MacroAssembler::c2bool(Register x) { 1017 // implements x == 0 ? 0 : 1 1018 // note: must only look at least-significant byte of x 1019 // since C-style booleans are stored in one byte 1020 // only! (was bug) 1021 tst(x, 0xff); 1022 cset(x, Assembler::NE); 1023 } 1024 1025 address MacroAssembler::ic_call(address entry, jint method_index) { 1026 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1027 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1028 // uintptr_t offset; 1029 // ldr_constant(rscratch2, const_ptr); 1030 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1031 return trampoline_call(Address(entry, rh)); 1032 } 1033 1034 int MacroAssembler::ic_check_size() { 1035 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1036 return NativeInstruction::instruction_size * 7; 1037 } else { 1038 return NativeInstruction::instruction_size * 5; 1039 } 1040 } 1041 1042 int MacroAssembler::ic_check(int end_alignment) { 1043 Register receiver = j_rarg0; 1044 Register data = rscratch2; 1045 Register tmp1 = rscratch1; 1046 Register tmp2 = r10; 1047 1048 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1049 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1050 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1051 // before the inline cache check here, and not after 1052 align(end_alignment, offset() + ic_check_size()); 1053 1054 int uep_offset = offset(); 1055 1056 if (UseCompressedClassPointers) { 1057 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1058 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1059 cmpw(tmp1, tmp2); 1060 } else { 1061 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1062 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1063 cmp(tmp1, tmp2); 1064 } 1065 1066 Label dont; 1067 br(Assembler::EQ, dont); 1068 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1069 bind(dont); 1070 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1071 1072 return uep_offset; 1073 } 1074 1075 // Implementation of call_VM versions 1076 1077 void MacroAssembler::call_VM(Register oop_result, 1078 address entry_point, 1079 bool check_exceptions) { 1080 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1081 } 1082 1083 void MacroAssembler::call_VM(Register oop_result, 1084 address entry_point, 1085 Register arg_1, 1086 bool check_exceptions) { 1087 pass_arg1(this, arg_1); 1088 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1089 } 1090 1091 void MacroAssembler::call_VM(Register oop_result, 1092 address entry_point, 1093 Register arg_1, 1094 Register arg_2, 1095 bool check_exceptions) { 1096 assert_different_registers(arg_1, c_rarg2); 1097 pass_arg2(this, arg_2); 1098 pass_arg1(this, arg_1); 1099 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1100 } 1101 1102 void MacroAssembler::call_VM(Register oop_result, 1103 address entry_point, 1104 Register arg_1, 1105 Register arg_2, 1106 Register arg_3, 1107 bool check_exceptions) { 1108 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1109 assert_different_registers(arg_2, c_rarg3); 1110 pass_arg3(this, arg_3); 1111 1112 pass_arg2(this, arg_2); 1113 1114 pass_arg1(this, arg_1); 1115 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1116 } 1117 1118 void MacroAssembler::call_VM(Register oop_result, 1119 Register last_java_sp, 1120 address entry_point, 1121 int number_of_arguments, 1122 bool check_exceptions) { 1123 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1124 } 1125 1126 void MacroAssembler::call_VM(Register oop_result, 1127 Register last_java_sp, 1128 address entry_point, 1129 Register arg_1, 1130 bool check_exceptions) { 1131 pass_arg1(this, arg_1); 1132 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1133 } 1134 1135 void MacroAssembler::call_VM(Register oop_result, 1136 Register last_java_sp, 1137 address entry_point, 1138 Register arg_1, 1139 Register arg_2, 1140 bool check_exceptions) { 1141 1142 assert_different_registers(arg_1, c_rarg2); 1143 pass_arg2(this, arg_2); 1144 pass_arg1(this, arg_1); 1145 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1146 } 1147 1148 void MacroAssembler::call_VM(Register oop_result, 1149 Register last_java_sp, 1150 address entry_point, 1151 Register arg_1, 1152 Register arg_2, 1153 Register arg_3, 1154 bool check_exceptions) { 1155 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1156 assert_different_registers(arg_2, c_rarg3); 1157 pass_arg3(this, arg_3); 1158 pass_arg2(this, arg_2); 1159 pass_arg1(this, arg_1); 1160 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1161 } 1162 1163 1164 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1165 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1166 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1167 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1168 } 1169 1170 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1171 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1172 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1173 } 1174 1175 void MacroAssembler::align(int modulus) { 1176 align(modulus, offset()); 1177 } 1178 1179 // Ensure that the code at target bytes offset from the current offset() is aligned 1180 // according to modulus. 1181 void MacroAssembler::align(int modulus, int target) { 1182 int delta = target - offset(); 1183 while ((offset() + delta) % modulus != 0) nop(); 1184 } 1185 1186 void MacroAssembler::post_call_nop() { 1187 if (!Continuations::enabled()) { 1188 return; 1189 } 1190 InstructionMark im(this); 1191 relocate(post_call_nop_Relocation::spec()); 1192 InlineSkippedInstructionsCounter skipCounter(this); 1193 nop(); 1194 movk(zr, 0); 1195 movk(zr, 0); 1196 } 1197 1198 // these are no-ops overridden by InterpreterMacroAssembler 1199 1200 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1201 1202 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1203 1204 // Look up the method for a megamorphic invokeinterface call. 1205 // The target method is determined by <intf_klass, itable_index>. 1206 // The receiver klass is in recv_klass. 1207 // On success, the result will be in method_result, and execution falls through. 1208 // On failure, execution transfers to the given label. 1209 void MacroAssembler::lookup_interface_method(Register recv_klass, 1210 Register intf_klass, 1211 RegisterOrConstant itable_index, 1212 Register method_result, 1213 Register scan_temp, 1214 Label& L_no_such_interface, 1215 bool return_method) { 1216 assert_different_registers(recv_klass, intf_klass, scan_temp); 1217 assert_different_registers(method_result, intf_klass, scan_temp); 1218 assert(recv_klass != method_result || !return_method, 1219 "recv_klass can be destroyed when method isn't needed"); 1220 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1221 "caller must use same register for non-constant itable index as for method"); 1222 1223 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1224 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1225 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1226 int scan_step = itableOffsetEntry::size() * wordSize; 1227 int vte_size = vtableEntry::size_in_bytes(); 1228 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1229 1230 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1231 1232 // Could store the aligned, prescaled offset in the klass. 1233 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1234 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1235 add(scan_temp, scan_temp, vtable_base); 1236 1237 if (return_method) { 1238 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1239 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1240 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1241 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1242 if (itentry_off) 1243 add(recv_klass, recv_klass, itentry_off); 1244 } 1245 1246 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1247 // if (scan->interface() == intf) { 1248 // result = (klass + scan->offset() + itable_index); 1249 // } 1250 // } 1251 Label search, found_method; 1252 1253 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1254 cmp(intf_klass, method_result); 1255 br(Assembler::EQ, found_method); 1256 bind(search); 1257 // Check that the previous entry is non-null. A null entry means that 1258 // the receiver class doesn't implement the interface, and wasn't the 1259 // same as when the caller was compiled. 1260 cbz(method_result, L_no_such_interface); 1261 if (itableOffsetEntry::interface_offset() != 0) { 1262 add(scan_temp, scan_temp, scan_step); 1263 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1264 } else { 1265 ldr(method_result, Address(pre(scan_temp, scan_step))); 1266 } 1267 cmp(intf_klass, method_result); 1268 br(Assembler::NE, search); 1269 1270 bind(found_method); 1271 1272 // Got a hit. 1273 if (return_method) { 1274 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1275 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1276 } 1277 } 1278 1279 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1280 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1281 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1282 // The target method is determined by <holder_klass, itable_index>. 1283 // The receiver klass is in recv_klass. 1284 // On success, the result will be in method_result, and execution falls through. 1285 // On failure, execution transfers to the given label. 1286 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1287 Register holder_klass, 1288 Register resolved_klass, 1289 Register method_result, 1290 Register temp_itbl_klass, 1291 Register scan_temp, 1292 int itable_index, 1293 Label& L_no_such_interface) { 1294 // 'method_result' is only used as output register at the very end of this method. 1295 // Until then we can reuse it as 'holder_offset'. 1296 Register holder_offset = method_result; 1297 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1298 1299 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1300 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1301 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1302 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1303 1304 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1305 1306 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1307 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1308 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1309 // temp_itbl_klass = itable[0]._interface; 1310 int vtblEntrySize = vtableEntry::size_in_bytes(); 1311 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1312 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1313 mov(holder_offset, zr); 1314 // scan_temp = &(itable[0]._interface) 1315 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1316 1317 // Initial checks: 1318 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1319 // - if (itable[0] == holder_klass), shortcut to "holder found" 1320 // - if (itable[0] == 0), no such interface 1321 cmp(resolved_klass, holder_klass); 1322 br(Assembler::NE, L_loop_search_resolved_entry); 1323 cmp(holder_klass, temp_itbl_klass); 1324 br(Assembler::EQ, L_holder_found); 1325 cbz(temp_itbl_klass, L_no_such_interface); 1326 1327 // Loop: Look for holder_klass record in itable 1328 // do { 1329 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1330 // if (temp_itbl_klass == holder_klass) { 1331 // goto L_holder_found; // Found! 1332 // } 1333 // } while (temp_itbl_klass != 0); 1334 // goto L_no_such_interface // Not found. 1335 Label L_search_holder; 1336 bind(L_search_holder); 1337 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1338 cmp(holder_klass, temp_itbl_klass); 1339 br(Assembler::EQ, L_holder_found); 1340 cbnz(temp_itbl_klass, L_search_holder); 1341 1342 b(L_no_such_interface); 1343 1344 // Loop: Look for resolved_class record in itable 1345 // while (true) { 1346 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1347 // if (temp_itbl_klass == 0) { 1348 // goto L_no_such_interface; 1349 // } 1350 // if (temp_itbl_klass == resolved_klass) { 1351 // goto L_resolved_found; // Found! 1352 // } 1353 // if (temp_itbl_klass == holder_klass) { 1354 // holder_offset = scan_temp; 1355 // } 1356 // } 1357 // 1358 Label L_loop_search_resolved; 1359 bind(L_loop_search_resolved); 1360 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1361 bind(L_loop_search_resolved_entry); 1362 cbz(temp_itbl_klass, L_no_such_interface); 1363 cmp(resolved_klass, temp_itbl_klass); 1364 br(Assembler::EQ, L_resolved_found); 1365 cmp(holder_klass, temp_itbl_klass); 1366 br(Assembler::NE, L_loop_search_resolved); 1367 mov(holder_offset, scan_temp); 1368 b(L_loop_search_resolved); 1369 1370 // See if we already have a holder klass. If not, go and scan for it. 1371 bind(L_resolved_found); 1372 cbz(holder_offset, L_search_holder); 1373 mov(scan_temp, holder_offset); 1374 1375 // Finally, scan_temp contains holder_klass vtable offset 1376 bind(L_holder_found); 1377 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1378 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1379 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1380 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1381 } 1382 1383 // virtual method calling 1384 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1385 RegisterOrConstant vtable_index, 1386 Register method_result) { 1387 assert(vtableEntry::size() * wordSize == 8, 1388 "adjust the scaling in the code below"); 1389 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1390 1391 if (vtable_index.is_register()) { 1392 lea(method_result, Address(recv_klass, 1393 vtable_index.as_register(), 1394 Address::lsl(LogBytesPerWord))); 1395 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1396 } else { 1397 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1398 ldr(method_result, 1399 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1400 } 1401 } 1402 1403 void MacroAssembler::check_klass_subtype(Register sub_klass, 1404 Register super_klass, 1405 Register temp_reg, 1406 Label& L_success) { 1407 Label L_failure; 1408 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1409 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1410 bind(L_failure); 1411 } 1412 1413 1414 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1415 Register super_klass, 1416 Register temp_reg, 1417 Label* L_success, 1418 Label* L_failure, 1419 Label* L_slow_path, 1420 RegisterOrConstant super_check_offset) { 1421 assert_different_registers(sub_klass, super_klass, temp_reg); 1422 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1423 if (super_check_offset.is_register()) { 1424 assert_different_registers(sub_klass, super_klass, 1425 super_check_offset.as_register()); 1426 } else if (must_load_sco) { 1427 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1428 } 1429 1430 Label L_fallthrough; 1431 int label_nulls = 0; 1432 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1433 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1434 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1435 assert(label_nulls <= 1, "at most one null in the batch"); 1436 1437 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1438 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1439 Address super_check_offset_addr(super_klass, sco_offset); 1440 1441 // Hacked jmp, which may only be used just before L_fallthrough. 1442 #define final_jmp(label) \ 1443 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1444 else b(label) /*omit semi*/ 1445 1446 // If the pointers are equal, we are done (e.g., String[] elements). 1447 // This self-check enables sharing of secondary supertype arrays among 1448 // non-primary types such as array-of-interface. Otherwise, each such 1449 // type would need its own customized SSA. 1450 // We move this check to the front of the fast path because many 1451 // type checks are in fact trivially successful in this manner, 1452 // so we get a nicely predicted branch right at the start of the check. 1453 cmp(sub_klass, super_klass); 1454 br(Assembler::EQ, *L_success); 1455 1456 // Check the supertype display: 1457 if (must_load_sco) { 1458 ldrw(temp_reg, super_check_offset_addr); 1459 super_check_offset = RegisterOrConstant(temp_reg); 1460 } 1461 Address super_check_addr(sub_klass, super_check_offset); 1462 ldr(rscratch1, super_check_addr); 1463 cmp(super_klass, rscratch1); // load displayed supertype 1464 1465 // This check has worked decisively for primary supers. 1466 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1467 // (Secondary supers are interfaces and very deeply nested subtypes.) 1468 // This works in the same check above because of a tricky aliasing 1469 // between the super_cache and the primary super display elements. 1470 // (The 'super_check_addr' can address either, as the case requires.) 1471 // Note that the cache is updated below if it does not help us find 1472 // what we need immediately. 1473 // So if it was a primary super, we can just fail immediately. 1474 // Otherwise, it's the slow path for us (no success at this point). 1475 1476 if (super_check_offset.is_register()) { 1477 br(Assembler::EQ, *L_success); 1478 subs(zr, super_check_offset.as_register(), sc_offset); 1479 if (L_failure == &L_fallthrough) { 1480 br(Assembler::EQ, *L_slow_path); 1481 } else { 1482 br(Assembler::NE, *L_failure); 1483 final_jmp(*L_slow_path); 1484 } 1485 } else if (super_check_offset.as_constant() == sc_offset) { 1486 // Need a slow path; fast failure is impossible. 1487 if (L_slow_path == &L_fallthrough) { 1488 br(Assembler::EQ, *L_success); 1489 } else { 1490 br(Assembler::NE, *L_slow_path); 1491 final_jmp(*L_success); 1492 } 1493 } else { 1494 // No slow path; it's a fast decision. 1495 if (L_failure == &L_fallthrough) { 1496 br(Assembler::EQ, *L_success); 1497 } else { 1498 br(Assembler::NE, *L_failure); 1499 final_jmp(*L_success); 1500 } 1501 } 1502 1503 bind(L_fallthrough); 1504 1505 #undef final_jmp 1506 } 1507 1508 // These two are taken from x86, but they look generally useful 1509 1510 // scans count pointer sized words at [addr] for occurrence of value, 1511 // generic 1512 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1513 Register scratch) { 1514 Label Lloop, Lexit; 1515 cbz(count, Lexit); 1516 bind(Lloop); 1517 ldr(scratch, post(addr, wordSize)); 1518 cmp(value, scratch); 1519 br(EQ, Lexit); 1520 sub(count, count, 1); 1521 cbnz(count, Lloop); 1522 bind(Lexit); 1523 } 1524 1525 // scans count 4 byte words at [addr] for occurrence of value, 1526 // generic 1527 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1528 Register scratch) { 1529 Label Lloop, Lexit; 1530 cbz(count, Lexit); 1531 bind(Lloop); 1532 ldrw(scratch, post(addr, wordSize)); 1533 cmpw(value, scratch); 1534 br(EQ, Lexit); 1535 sub(count, count, 1); 1536 cbnz(count, Lloop); 1537 bind(Lexit); 1538 } 1539 1540 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1541 Register super_klass, 1542 Register temp_reg, 1543 Register temp2_reg, 1544 Label* L_success, 1545 Label* L_failure, 1546 bool set_cond_codes) { 1547 // NB! Callers may assume that, when temp2_reg is a valid register, 1548 // this code sets it to a nonzero value. 1549 1550 assert_different_registers(sub_klass, super_klass, temp_reg); 1551 if (temp2_reg != noreg) 1552 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1553 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1554 1555 Label L_fallthrough; 1556 int label_nulls = 0; 1557 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1558 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1559 assert(label_nulls <= 1, "at most one null in the batch"); 1560 1561 // a couple of useful fields in sub_klass: 1562 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1563 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1564 Address secondary_supers_addr(sub_klass, ss_offset); 1565 Address super_cache_addr( sub_klass, sc_offset); 1566 1567 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1568 1569 // Do a linear scan of the secondary super-klass chain. 1570 // This code is rarely used, so simplicity is a virtue here. 1571 // The repne_scan instruction uses fixed registers, which we must spill. 1572 // Don't worry too much about pre-existing connections with the input regs. 1573 1574 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1575 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1576 1577 RegSet pushed_registers; 1578 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1579 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1580 1581 if (super_klass != r0) { 1582 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1583 } 1584 1585 push(pushed_registers, sp); 1586 1587 // Get super_klass value into r0 (even if it was in r5 or r2). 1588 if (super_klass != r0) { 1589 mov(r0, super_klass); 1590 } 1591 1592 #ifndef PRODUCT 1593 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1594 #endif //PRODUCT 1595 1596 // We will consult the secondary-super array. 1597 ldr(r5, secondary_supers_addr); 1598 // Load the array length. 1599 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1600 // Skip to start of data. 1601 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1602 1603 cmp(sp, zr); // Clear Z flag; SP is never zero 1604 // Scan R2 words at [R5] for an occurrence of R0. 1605 // Set NZ/Z based on last compare. 1606 repne_scan(r5, r0, r2, rscratch1); 1607 1608 // Unspill the temp. registers: 1609 pop(pushed_registers, sp); 1610 1611 br(Assembler::NE, *L_failure); 1612 1613 // Success. Cache the super we found and proceed in triumph. 1614 str(super_klass, super_cache_addr); 1615 1616 if (L_success != &L_fallthrough) { 1617 b(*L_success); 1618 } 1619 1620 #undef IS_A_TEMP 1621 1622 bind(L_fallthrough); 1623 } 1624 1625 // Ensure that the inline code and the stub are using the same registers. 1626 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1627 do { \ 1628 assert(r_super_klass == r0 && \ 1629 r_array_base == r1 && \ 1630 r_array_length == r2 && \ 1631 (r_array_index == r3 || r_array_index == noreg) && \ 1632 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1633 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1634 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1635 } while(0) 1636 1637 // Return true: we succeeded in generating this code 1638 bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 1639 Register r_super_klass, 1640 Register temp1, 1641 Register temp2, 1642 Register temp3, 1643 FloatRegister vtemp, 1644 Register result, 1645 u1 super_klass_slot, 1646 bool stub_is_near) { 1647 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1648 1649 Label L_fallthrough; 1650 1651 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1652 1653 const Register 1654 r_array_base = temp1, // r1 1655 r_array_length = temp2, // r2 1656 r_array_index = temp3, // r3 1657 r_bitmap = rscratch2; 1658 1659 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1660 1661 u1 bit = super_klass_slot; 1662 1663 // Make sure that result is nonzero if the TBZ below misses. 1664 mov(result, 1); 1665 1666 // We're going to need the bitmap in a vector reg and in a core reg, 1667 // so load both now. 1668 ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 1669 if (bit != 0) { 1670 ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset())); 1671 } 1672 // First check the bitmap to see if super_klass might be present. If 1673 // the bit is zero, we are certain that super_klass is not one of 1674 // the secondary supers. 1675 tbz(r_bitmap, bit, L_fallthrough); 1676 1677 // Get the first array index that can contain super_klass into r_array_index. 1678 if (bit != 0) { 1679 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1680 cnt(vtemp, T8B, vtemp); 1681 addv(vtemp, T8B, vtemp); 1682 fmovd(r_array_index, vtemp); 1683 } else { 1684 mov(r_array_index, (u1)1); 1685 } 1686 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1687 1688 // We will consult the secondary-super array. 1689 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1690 1691 // The value i in r_array_index is >= 1, so even though r_array_base 1692 // points to the length, we don't need to adjust it to point to the 1693 // data. 1694 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1695 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1696 1697 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1698 eor(result, result, r_super_klass); 1699 cbz(result, L_fallthrough); // Found a match 1700 1701 // Is there another entry to check? Consult the bitmap. 1702 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1703 1704 // Linear probe. 1705 if (bit != 0) { 1706 ror(r_bitmap, r_bitmap, bit); 1707 } 1708 1709 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1710 // The next slot to be inspected, by the stub we're about to call, 1711 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1712 // have been checked. 1713 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1714 if (stub_is_near) { 1715 bl(stub); 1716 } else { 1717 address call = trampoline_call(stub); 1718 if (call == nullptr) { 1719 return false; // trampoline allocation failed 1720 } 1721 } 1722 1723 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1724 1725 bind(L_fallthrough); 1726 1727 if (VerifySecondarySupers) { 1728 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1729 temp1, temp2, result); // r1, r2, r5 1730 } 1731 return true; 1732 } 1733 1734 // Called by code generated by check_klass_subtype_slow_path 1735 // above. This is called when there is a collision in the hashed 1736 // lookup in the secondary supers array. 1737 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1738 Register r_array_base, 1739 Register r_array_index, 1740 Register r_bitmap, 1741 Register temp1, 1742 Register result) { 1743 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1744 1745 const Register 1746 r_array_length = temp1, 1747 r_sub_klass = noreg; // unused 1748 1749 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1750 1751 Label L_fallthrough, L_huge; 1752 1753 // Load the array length. 1754 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1755 // And adjust the array base to point to the data. 1756 // NB! Effectively increments current slot index by 1. 1757 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1758 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1759 1760 // The bitmap is full to bursting. 1761 // Implicit invariant: BITMAP_FULL implies (length > 0) 1762 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1763 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2)); 1764 br(GT, L_huge); 1765 1766 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1767 // current slot (at secondary_supers[r_array_index]) has not yet 1768 // been inspected, and r_array_index may be out of bounds if we 1769 // wrapped around the end of the array. 1770 1771 { // This is conventional linear probing, but instead of terminating 1772 // when a null entry is found in the table, we maintain a bitmap 1773 // in which a 0 indicates missing entries. 1774 // The check above guarantees there are 0s in the bitmap, so the loop 1775 // eventually terminates. 1776 Label L_loop; 1777 bind(L_loop); 1778 1779 // Check for wraparound. 1780 cmp(r_array_index, r_array_length); 1781 csel(r_array_index, zr, r_array_index, GE); 1782 1783 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1784 eor(result, rscratch1, r_super_klass); 1785 cbz(result, L_fallthrough); 1786 1787 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1788 1789 ror(r_bitmap, r_bitmap, 1); 1790 add(r_array_index, r_array_index, 1); 1791 b(L_loop); 1792 } 1793 1794 { // Degenerate case: more than 64 secondary supers. 1795 // FIXME: We could do something smarter here, maybe a vectorized 1796 // comparison or a binary search, but is that worth any added 1797 // complexity? 1798 bind(L_huge); 1799 cmp(sp, zr); // Clear Z flag; SP is never zero 1800 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1801 cset(result, NE); // result == 0 iff we got a match. 1802 } 1803 1804 bind(L_fallthrough); 1805 } 1806 1807 // Make sure that the hashed lookup and a linear scan agree. 1808 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1809 Register r_super_klass, 1810 Register temp1, 1811 Register temp2, 1812 Register result) { 1813 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1814 1815 const Register 1816 r_array_base = temp1, 1817 r_array_length = temp2, 1818 r_array_index = noreg, // unused 1819 r_bitmap = noreg; // unused 1820 1821 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1822 1823 BLOCK_COMMENT("verify_secondary_supers_table {"); 1824 1825 // We will consult the secondary-super array. 1826 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1827 1828 // Load the array length. 1829 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1830 // And adjust the array base to point to the data. 1831 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1832 1833 cmp(sp, zr); // Clear Z flag; SP is never zero 1834 // Scan R2 words at [R5] for an occurrence of R0. 1835 // Set NZ/Z based on last compare. 1836 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 1837 // rscratch1 == 0 iff we got a match. 1838 cset(rscratch1, NE); 1839 1840 Label passed; 1841 cmp(result, zr); 1842 cset(result, NE); // normalize result to 0/1 for comparison 1843 1844 cmp(rscratch1, result); 1845 br(EQ, passed); 1846 { 1847 mov(r0, r_super_klass); // r0 <- r0 1848 mov(r1, r_sub_klass); // r1 <- r4 1849 mov(r2, /*expected*/rscratch1); // r2 <- r8 1850 mov(r3, result); // r3 <- r5 1851 mov(r4, (address)("mismatch")); // r4 <- const 1852 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 1853 should_not_reach_here(); 1854 } 1855 bind(passed); 1856 1857 BLOCK_COMMENT("} verify_secondary_supers_table"); 1858 } 1859 1860 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1861 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1862 assert_different_registers(klass, rthread, scratch); 1863 1864 Label L_fallthrough, L_tmp; 1865 if (L_fast_path == nullptr) { 1866 L_fast_path = &L_fallthrough; 1867 } else if (L_slow_path == nullptr) { 1868 L_slow_path = &L_fallthrough; 1869 } 1870 // Fast path check: class is fully initialized 1871 lea(scratch, Address(klass, InstanceKlass::init_state_offset())); 1872 ldarb(scratch, scratch); 1873 subs(zr, scratch, InstanceKlass::fully_initialized); 1874 br(Assembler::EQ, *L_fast_path); 1875 1876 // Fast path check: current thread is initializer thread 1877 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1878 cmp(rthread, scratch); 1879 1880 if (L_slow_path == &L_fallthrough) { 1881 br(Assembler::EQ, *L_fast_path); 1882 bind(*L_slow_path); 1883 } else if (L_fast_path == &L_fallthrough) { 1884 br(Assembler::NE, *L_slow_path); 1885 bind(*L_fast_path); 1886 } else { 1887 Unimplemented(); 1888 } 1889 } 1890 1891 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1892 if (!VerifyOops) return; 1893 1894 // Pass register number to verify_oop_subroutine 1895 const char* b = nullptr; 1896 { 1897 ResourceMark rm; 1898 stringStream ss; 1899 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1900 b = code_string(ss.as_string()); 1901 } 1902 BLOCK_COMMENT("verify_oop {"); 1903 1904 strip_return_address(); // This might happen within a stack frame. 1905 protect_return_address(); 1906 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1907 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1908 1909 mov(r0, reg); 1910 movptr(rscratch1, (uintptr_t)(address)b); 1911 1912 // call indirectly to solve generation ordering problem 1913 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1914 ldr(rscratch2, Address(rscratch2)); 1915 blr(rscratch2); 1916 1917 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1918 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1919 authenticate_return_address(); 1920 1921 BLOCK_COMMENT("} verify_oop"); 1922 } 1923 1924 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1925 if (!VerifyOops) return; 1926 1927 const char* b = nullptr; 1928 { 1929 ResourceMark rm; 1930 stringStream ss; 1931 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1932 b = code_string(ss.as_string()); 1933 } 1934 BLOCK_COMMENT("verify_oop_addr {"); 1935 1936 strip_return_address(); // This might happen within a stack frame. 1937 protect_return_address(); 1938 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1939 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1940 1941 // addr may contain sp so we will have to adjust it based on the 1942 // pushes that we just did. 1943 if (addr.uses(sp)) { 1944 lea(r0, addr); 1945 ldr(r0, Address(r0, 4 * wordSize)); 1946 } else { 1947 ldr(r0, addr); 1948 } 1949 movptr(rscratch1, (uintptr_t)(address)b); 1950 1951 // call indirectly to solve generation ordering problem 1952 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1953 ldr(rscratch2, Address(rscratch2)); 1954 blr(rscratch2); 1955 1956 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1957 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1958 authenticate_return_address(); 1959 1960 BLOCK_COMMENT("} verify_oop_addr"); 1961 } 1962 1963 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1964 int extra_slot_offset) { 1965 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1966 int stackElementSize = Interpreter::stackElementSize; 1967 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1968 #ifdef ASSERT 1969 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1970 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1971 #endif 1972 if (arg_slot.is_constant()) { 1973 return Address(esp, arg_slot.as_constant() * stackElementSize 1974 + offset); 1975 } else { 1976 add(rscratch1, esp, arg_slot.as_register(), 1977 ext::uxtx, exact_log2(stackElementSize)); 1978 return Address(rscratch1, offset); 1979 } 1980 } 1981 1982 void MacroAssembler::call_VM_leaf_base(address entry_point, 1983 int number_of_arguments, 1984 Label *retaddr) { 1985 Label E, L; 1986 1987 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1988 1989 mov(rscratch1, entry_point); 1990 blr(rscratch1); 1991 if (retaddr) 1992 bind(*retaddr); 1993 1994 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1995 } 1996 1997 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1998 call_VM_leaf_base(entry_point, number_of_arguments); 1999 } 2000 2001 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2002 pass_arg0(this, arg_0); 2003 call_VM_leaf_base(entry_point, 1); 2004 } 2005 2006 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2007 assert_different_registers(arg_1, c_rarg0); 2008 pass_arg0(this, arg_0); 2009 pass_arg1(this, arg_1); 2010 call_VM_leaf_base(entry_point, 2); 2011 } 2012 2013 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2014 Register arg_1, Register arg_2) { 2015 assert_different_registers(arg_1, c_rarg0); 2016 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2017 pass_arg0(this, arg_0); 2018 pass_arg1(this, arg_1); 2019 pass_arg2(this, arg_2); 2020 call_VM_leaf_base(entry_point, 3); 2021 } 2022 2023 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2024 pass_arg0(this, arg_0); 2025 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2026 } 2027 2028 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2029 2030 assert_different_registers(arg_0, c_rarg1); 2031 pass_arg1(this, arg_1); 2032 pass_arg0(this, arg_0); 2033 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2034 } 2035 2036 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2037 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2038 assert_different_registers(arg_1, c_rarg2); 2039 pass_arg2(this, arg_2); 2040 pass_arg1(this, arg_1); 2041 pass_arg0(this, arg_0); 2042 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2043 } 2044 2045 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2046 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2047 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2048 assert_different_registers(arg_2, c_rarg3); 2049 pass_arg3(this, arg_3); 2050 pass_arg2(this, arg_2); 2051 pass_arg1(this, arg_1); 2052 pass_arg0(this, arg_0); 2053 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2054 } 2055 2056 void MacroAssembler::null_check(Register reg, int offset) { 2057 if (needs_explicit_null_check(offset)) { 2058 // provoke OS null exception if reg is null by 2059 // accessing M[reg] w/o changing any registers 2060 // NOTE: this is plenty to provoke a segv 2061 ldr(zr, Address(reg)); 2062 } else { 2063 // nothing to do, (later) access of M[reg + offset] 2064 // will provoke OS null exception if reg is null 2065 } 2066 } 2067 2068 // MacroAssembler protected routines needed to implement 2069 // public methods 2070 2071 void MacroAssembler::mov(Register r, Address dest) { 2072 code_section()->relocate(pc(), dest.rspec()); 2073 uint64_t imm64 = (uint64_t)dest.target(); 2074 movptr(r, imm64); 2075 } 2076 2077 // Move a constant pointer into r. In AArch64 mode the virtual 2078 // address space is 48 bits in size, so we only need three 2079 // instructions to create a patchable instruction sequence that can 2080 // reach anywhere. 2081 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2082 #ifndef PRODUCT 2083 { 2084 char buffer[64]; 2085 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2086 block_comment(buffer); 2087 } 2088 #endif 2089 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2090 movz(r, imm64 & 0xffff); 2091 imm64 >>= 16; 2092 movk(r, imm64 & 0xffff, 16); 2093 imm64 >>= 16; 2094 movk(r, imm64 & 0xffff, 32); 2095 } 2096 2097 // Macro to mov replicated immediate to vector register. 2098 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2099 // the upper 56/48/32 bits must be zeros for B/H/S type. 2100 // Vd will get the following values for different arrangements in T 2101 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2102 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2103 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2104 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2105 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2106 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2107 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2108 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2109 // Clobbers rscratch1 2110 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2111 assert(T != T1Q, "unsupported"); 2112 if (T == T1D || T == T2D) { 2113 int imm = operand_valid_for_movi_immediate(imm64, T); 2114 if (-1 != imm) { 2115 movi(Vd, T, imm); 2116 } else { 2117 mov(rscratch1, imm64); 2118 dup(Vd, T, rscratch1); 2119 } 2120 return; 2121 } 2122 2123 #ifdef ASSERT 2124 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2125 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2126 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2127 #endif 2128 int shift = operand_valid_for_movi_immediate(imm64, T); 2129 uint32_t imm32 = imm64 & 0xffffffffULL; 2130 if (shift >= 0) { 2131 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2132 } else { 2133 movw(rscratch1, imm32); 2134 dup(Vd, T, rscratch1); 2135 } 2136 } 2137 2138 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2139 { 2140 #ifndef PRODUCT 2141 { 2142 char buffer[64]; 2143 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2144 block_comment(buffer); 2145 } 2146 #endif 2147 if (operand_valid_for_logical_immediate(false, imm64)) { 2148 orr(dst, zr, imm64); 2149 } else { 2150 // we can use a combination of MOVZ or MOVN with 2151 // MOVK to build up the constant 2152 uint64_t imm_h[4]; 2153 int zero_count = 0; 2154 int neg_count = 0; 2155 int i; 2156 for (i = 0; i < 4; i++) { 2157 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2158 if (imm_h[i] == 0) { 2159 zero_count++; 2160 } else if (imm_h[i] == 0xffffL) { 2161 neg_count++; 2162 } 2163 } 2164 if (zero_count == 4) { 2165 // one MOVZ will do 2166 movz(dst, 0); 2167 } else if (neg_count == 4) { 2168 // one MOVN will do 2169 movn(dst, 0); 2170 } else if (zero_count == 3) { 2171 for (i = 0; i < 4; i++) { 2172 if (imm_h[i] != 0L) { 2173 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2174 break; 2175 } 2176 } 2177 } else if (neg_count == 3) { 2178 // one MOVN will do 2179 for (int i = 0; i < 4; i++) { 2180 if (imm_h[i] != 0xffffL) { 2181 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2182 break; 2183 } 2184 } 2185 } else if (zero_count == 2) { 2186 // one MOVZ and one MOVK will do 2187 for (i = 0; i < 3; i++) { 2188 if (imm_h[i] != 0L) { 2189 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2190 i++; 2191 break; 2192 } 2193 } 2194 for (;i < 4; i++) { 2195 if (imm_h[i] != 0L) { 2196 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2197 } 2198 } 2199 } else if (neg_count == 2) { 2200 // one MOVN and one MOVK will do 2201 for (i = 0; i < 4; i++) { 2202 if (imm_h[i] != 0xffffL) { 2203 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2204 i++; 2205 break; 2206 } 2207 } 2208 for (;i < 4; i++) { 2209 if (imm_h[i] != 0xffffL) { 2210 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2211 } 2212 } 2213 } else if (zero_count == 1) { 2214 // one MOVZ and two MOVKs will do 2215 for (i = 0; i < 4; i++) { 2216 if (imm_h[i] != 0L) { 2217 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2218 i++; 2219 break; 2220 } 2221 } 2222 for (;i < 4; i++) { 2223 if (imm_h[i] != 0x0L) { 2224 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2225 } 2226 } 2227 } else if (neg_count == 1) { 2228 // one MOVN and two MOVKs will do 2229 for (i = 0; i < 4; i++) { 2230 if (imm_h[i] != 0xffffL) { 2231 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2232 i++; 2233 break; 2234 } 2235 } 2236 for (;i < 4; i++) { 2237 if (imm_h[i] != 0xffffL) { 2238 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2239 } 2240 } 2241 } else { 2242 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2243 movz(dst, (uint32_t)imm_h[0], 0); 2244 for (i = 1; i < 4; i++) { 2245 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2246 } 2247 } 2248 } 2249 } 2250 2251 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2252 { 2253 #ifndef PRODUCT 2254 { 2255 char buffer[64]; 2256 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2257 block_comment(buffer); 2258 } 2259 #endif 2260 if (operand_valid_for_logical_immediate(true, imm32)) { 2261 orrw(dst, zr, imm32); 2262 } else { 2263 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2264 // constant 2265 uint32_t imm_h[2]; 2266 imm_h[0] = imm32 & 0xffff; 2267 imm_h[1] = ((imm32 >> 16) & 0xffff); 2268 if (imm_h[0] == 0) { 2269 movzw(dst, imm_h[1], 16); 2270 } else if (imm_h[0] == 0xffff) { 2271 movnw(dst, imm_h[1] ^ 0xffff, 16); 2272 } else if (imm_h[1] == 0) { 2273 movzw(dst, imm_h[0], 0); 2274 } else if (imm_h[1] == 0xffff) { 2275 movnw(dst, imm_h[0] ^ 0xffff, 0); 2276 } else { 2277 // use a MOVZ and MOVK (makes it easier to debug) 2278 movzw(dst, imm_h[0], 0); 2279 movkw(dst, imm_h[1], 16); 2280 } 2281 } 2282 } 2283 2284 // Form an address from base + offset in Rd. Rd may or may 2285 // not actually be used: you must use the Address that is returned. 2286 // It is up to you to ensure that the shift provided matches the size 2287 // of your data. 2288 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2289 if (Address::offset_ok_for_immed(byte_offset, shift)) 2290 // It fits; no need for any heroics 2291 return Address(base, byte_offset); 2292 2293 // Don't do anything clever with negative or misaligned offsets 2294 unsigned mask = (1 << shift) - 1; 2295 if (byte_offset < 0 || byte_offset & mask) { 2296 mov(Rd, byte_offset); 2297 add(Rd, base, Rd); 2298 return Address(Rd); 2299 } 2300 2301 // See if we can do this with two 12-bit offsets 2302 { 2303 uint64_t word_offset = byte_offset >> shift; 2304 uint64_t masked_offset = word_offset & 0xfff000; 2305 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2306 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2307 add(Rd, base, masked_offset << shift); 2308 word_offset -= masked_offset; 2309 return Address(Rd, word_offset << shift); 2310 } 2311 } 2312 2313 // Do it the hard way 2314 mov(Rd, byte_offset); 2315 add(Rd, base, Rd); 2316 return Address(Rd); 2317 } 2318 2319 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2320 bool want_remainder, Register scratch) 2321 { 2322 // Full implementation of Java idiv and irem. The function 2323 // returns the (pc) offset of the div instruction - may be needed 2324 // for implicit exceptions. 2325 // 2326 // constraint : ra/rb =/= scratch 2327 // normal case 2328 // 2329 // input : ra: dividend 2330 // rb: divisor 2331 // 2332 // result: either 2333 // quotient (= ra idiv rb) 2334 // remainder (= ra irem rb) 2335 2336 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2337 2338 int idivl_offset = offset(); 2339 if (! want_remainder) { 2340 sdivw(result, ra, rb); 2341 } else { 2342 sdivw(scratch, ra, rb); 2343 Assembler::msubw(result, scratch, rb, ra); 2344 } 2345 2346 return idivl_offset; 2347 } 2348 2349 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2350 bool want_remainder, Register scratch) 2351 { 2352 // Full implementation of Java ldiv and lrem. The function 2353 // returns the (pc) offset of the div instruction - may be needed 2354 // for implicit exceptions. 2355 // 2356 // constraint : ra/rb =/= scratch 2357 // normal case 2358 // 2359 // input : ra: dividend 2360 // rb: divisor 2361 // 2362 // result: either 2363 // quotient (= ra idiv rb) 2364 // remainder (= ra irem rb) 2365 2366 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2367 2368 int idivq_offset = offset(); 2369 if (! want_remainder) { 2370 sdiv(result, ra, rb); 2371 } else { 2372 sdiv(scratch, ra, rb); 2373 Assembler::msub(result, scratch, rb, ra); 2374 } 2375 2376 return idivq_offset; 2377 } 2378 2379 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2380 address prev = pc() - NativeMembar::instruction_size; 2381 address last = code()->last_insn(); 2382 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2383 NativeMembar *bar = NativeMembar_at(prev); 2384 if (AlwaysMergeDMB) { 2385 bar->set_kind(bar->get_kind() | order_constraint); 2386 BLOCK_COMMENT("merged membar(always)"); 2387 return; 2388 } 2389 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2390 // doing so would introduce a StoreLoad which the caller did not 2391 // intend 2392 if (bar->get_kind() == order_constraint 2393 || bar->get_kind() == AnyAny 2394 || order_constraint == AnyAny) { 2395 // We are merging two memory barrier instructions. On AArch64 we 2396 // can do this simply by ORing them together. 2397 bar->set_kind(bar->get_kind() | order_constraint); 2398 BLOCK_COMMENT("merged membar"); 2399 return; 2400 } else { 2401 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2402 // We need check the last 2 instructions 2403 address prev2 = prev - NativeMembar::instruction_size; 2404 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2405 NativeMembar *bar2 = NativeMembar_at(prev2); 2406 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2407 BLOCK_COMMENT("merged membar(elided)"); 2408 return; 2409 } 2410 } 2411 } 2412 code()->set_last_insn(pc()); 2413 dmb(Assembler::barrier(order_constraint)); 2414 } 2415 2416 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2417 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2418 merge_ldst(rt, adr, size_in_bytes, is_store); 2419 code()->clear_last_insn(); 2420 return true; 2421 } else { 2422 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2423 const uint64_t mask = size_in_bytes - 1; 2424 if (adr.getMode() == Address::base_plus_offset && 2425 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2426 code()->set_last_insn(pc()); 2427 } 2428 return false; 2429 } 2430 } 2431 2432 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2433 // We always try to merge two adjacent loads into one ldp. 2434 if (!try_merge_ldst(Rx, adr, 8, false)) { 2435 Assembler::ldr(Rx, adr); 2436 } 2437 } 2438 2439 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2440 // We always try to merge two adjacent loads into one ldp. 2441 if (!try_merge_ldst(Rw, adr, 4, false)) { 2442 Assembler::ldrw(Rw, adr); 2443 } 2444 } 2445 2446 void MacroAssembler::str(Register Rx, const Address &adr) { 2447 // We always try to merge two adjacent stores into one stp. 2448 if (!try_merge_ldst(Rx, adr, 8, true)) { 2449 Assembler::str(Rx, adr); 2450 } 2451 } 2452 2453 void MacroAssembler::strw(Register Rw, const Address &adr) { 2454 // We always try to merge two adjacent stores into one stp. 2455 if (!try_merge_ldst(Rw, adr, 4, true)) { 2456 Assembler::strw(Rw, adr); 2457 } 2458 } 2459 2460 // MacroAssembler routines found actually to be needed 2461 2462 void MacroAssembler::push(Register src) 2463 { 2464 str(src, Address(pre(esp, -1 * wordSize))); 2465 } 2466 2467 void MacroAssembler::pop(Register dst) 2468 { 2469 ldr(dst, Address(post(esp, 1 * wordSize))); 2470 } 2471 2472 // Note: load_unsigned_short used to be called load_unsigned_word. 2473 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2474 int off = offset(); 2475 ldrh(dst, src); 2476 return off; 2477 } 2478 2479 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2480 int off = offset(); 2481 ldrb(dst, src); 2482 return off; 2483 } 2484 2485 int MacroAssembler::load_signed_short(Register dst, Address src) { 2486 int off = offset(); 2487 ldrsh(dst, src); 2488 return off; 2489 } 2490 2491 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2492 int off = offset(); 2493 ldrsb(dst, src); 2494 return off; 2495 } 2496 2497 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2498 int off = offset(); 2499 ldrshw(dst, src); 2500 return off; 2501 } 2502 2503 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2504 int off = offset(); 2505 ldrsbw(dst, src); 2506 return off; 2507 } 2508 2509 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2510 switch (size_in_bytes) { 2511 case 8: ldr(dst, src); break; 2512 case 4: ldrw(dst, src); break; 2513 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2514 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2515 default: ShouldNotReachHere(); 2516 } 2517 } 2518 2519 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2520 switch (size_in_bytes) { 2521 case 8: str(src, dst); break; 2522 case 4: strw(src, dst); break; 2523 case 2: strh(src, dst); break; 2524 case 1: strb(src, dst); break; 2525 default: ShouldNotReachHere(); 2526 } 2527 } 2528 2529 void MacroAssembler::decrementw(Register reg, int value) 2530 { 2531 if (value < 0) { incrementw(reg, -value); return; } 2532 if (value == 0) { return; } 2533 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2534 /* else */ { 2535 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2536 movw(rscratch2, (unsigned)value); 2537 subw(reg, reg, rscratch2); 2538 } 2539 } 2540 2541 void MacroAssembler::decrement(Register reg, int value) 2542 { 2543 if (value < 0) { increment(reg, -value); return; } 2544 if (value == 0) { return; } 2545 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2546 /* else */ { 2547 assert(reg != rscratch2, "invalid dst for register decrement"); 2548 mov(rscratch2, (uint64_t)value); 2549 sub(reg, reg, rscratch2); 2550 } 2551 } 2552 2553 void MacroAssembler::decrementw(Address dst, int value) 2554 { 2555 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2556 if (dst.getMode() == Address::literal) { 2557 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2558 lea(rscratch2, dst); 2559 dst = Address(rscratch2); 2560 } 2561 ldrw(rscratch1, dst); 2562 decrementw(rscratch1, value); 2563 strw(rscratch1, dst); 2564 } 2565 2566 void MacroAssembler::decrement(Address dst, int value) 2567 { 2568 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2569 if (dst.getMode() == Address::literal) { 2570 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2571 lea(rscratch2, dst); 2572 dst = Address(rscratch2); 2573 } 2574 ldr(rscratch1, dst); 2575 decrement(rscratch1, value); 2576 str(rscratch1, dst); 2577 } 2578 2579 void MacroAssembler::incrementw(Register reg, int value) 2580 { 2581 if (value < 0) { decrementw(reg, -value); return; } 2582 if (value == 0) { return; } 2583 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2584 /* else */ { 2585 assert(reg != rscratch2, "invalid dst for register increment"); 2586 movw(rscratch2, (unsigned)value); 2587 addw(reg, reg, rscratch2); 2588 } 2589 } 2590 2591 void MacroAssembler::increment(Register reg, int value) 2592 { 2593 if (value < 0) { decrement(reg, -value); return; } 2594 if (value == 0) { return; } 2595 if (value < (1 << 12)) { add(reg, reg, value); return; } 2596 /* else */ { 2597 assert(reg != rscratch2, "invalid dst for register increment"); 2598 movw(rscratch2, (unsigned)value); 2599 add(reg, reg, rscratch2); 2600 } 2601 } 2602 2603 void MacroAssembler::incrementw(Address dst, int value) 2604 { 2605 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2606 if (dst.getMode() == Address::literal) { 2607 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2608 lea(rscratch2, dst); 2609 dst = Address(rscratch2); 2610 } 2611 ldrw(rscratch1, dst); 2612 incrementw(rscratch1, value); 2613 strw(rscratch1, dst); 2614 } 2615 2616 void MacroAssembler::increment(Address dst, int value) 2617 { 2618 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2619 if (dst.getMode() == Address::literal) { 2620 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2621 lea(rscratch2, dst); 2622 dst = Address(rscratch2); 2623 } 2624 ldr(rscratch1, dst); 2625 increment(rscratch1, value); 2626 str(rscratch1, dst); 2627 } 2628 2629 // Push lots of registers in the bit set supplied. Don't push sp. 2630 // Return the number of words pushed 2631 int MacroAssembler::push(unsigned int bitset, Register stack) { 2632 int words_pushed = 0; 2633 2634 // Scan bitset to accumulate register pairs 2635 unsigned char regs[32]; 2636 int count = 0; 2637 for (int reg = 0; reg <= 30; reg++) { 2638 if (1 & bitset) 2639 regs[count++] = reg; 2640 bitset >>= 1; 2641 } 2642 regs[count++] = zr->raw_encoding(); 2643 count &= ~1; // Only push an even number of regs 2644 2645 if (count) { 2646 stp(as_Register(regs[0]), as_Register(regs[1]), 2647 Address(pre(stack, -count * wordSize))); 2648 words_pushed += 2; 2649 } 2650 for (int i = 2; i < count; i += 2) { 2651 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2652 Address(stack, i * wordSize)); 2653 words_pushed += 2; 2654 } 2655 2656 assert(words_pushed == count, "oops, pushed != count"); 2657 2658 return count; 2659 } 2660 2661 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2662 int words_pushed = 0; 2663 2664 // Scan bitset to accumulate register pairs 2665 unsigned char regs[32]; 2666 int count = 0; 2667 for (int reg = 0; reg <= 30; reg++) { 2668 if (1 & bitset) 2669 regs[count++] = reg; 2670 bitset >>= 1; 2671 } 2672 regs[count++] = zr->raw_encoding(); 2673 count &= ~1; 2674 2675 for (int i = 2; i < count; i += 2) { 2676 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2677 Address(stack, i * wordSize)); 2678 words_pushed += 2; 2679 } 2680 if (count) { 2681 ldp(as_Register(regs[0]), as_Register(regs[1]), 2682 Address(post(stack, count * wordSize))); 2683 words_pushed += 2; 2684 } 2685 2686 assert(words_pushed == count, "oops, pushed != count"); 2687 2688 return count; 2689 } 2690 2691 // Push lots of registers in the bit set supplied. Don't push sp. 2692 // Return the number of dwords pushed 2693 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2694 int words_pushed = 0; 2695 bool use_sve = false; 2696 int sve_vector_size_in_bytes = 0; 2697 2698 #ifdef COMPILER2 2699 use_sve = Matcher::supports_scalable_vector(); 2700 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2701 #endif 2702 2703 // Scan bitset to accumulate register pairs 2704 unsigned char regs[32]; 2705 int count = 0; 2706 for (int reg = 0; reg <= 31; reg++) { 2707 if (1 & bitset) 2708 regs[count++] = reg; 2709 bitset >>= 1; 2710 } 2711 2712 if (count == 0) { 2713 return 0; 2714 } 2715 2716 if (mode == PushPopFull) { 2717 if (use_sve && sve_vector_size_in_bytes > 16) { 2718 mode = PushPopSVE; 2719 } else { 2720 mode = PushPopNeon; 2721 } 2722 } 2723 2724 #ifndef PRODUCT 2725 { 2726 char buffer[48]; 2727 if (mode == PushPopSVE) { 2728 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2729 } else if (mode == PushPopNeon) { 2730 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2731 } else { 2732 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2733 } 2734 block_comment(buffer); 2735 } 2736 #endif 2737 2738 if (mode == PushPopSVE) { 2739 sub(stack, stack, sve_vector_size_in_bytes * count); 2740 for (int i = 0; i < count; i++) { 2741 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2742 } 2743 return count * sve_vector_size_in_bytes / 8; 2744 } 2745 2746 if (mode == PushPopNeon) { 2747 if (count == 1) { 2748 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2749 return 2; 2750 } 2751 2752 bool odd = (count & 1) == 1; 2753 int push_slots = count + (odd ? 1 : 0); 2754 2755 // Always pushing full 128 bit registers. 2756 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2757 words_pushed += 2; 2758 2759 for (int i = 2; i + 1 < count; i += 2) { 2760 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2761 words_pushed += 2; 2762 } 2763 2764 if (odd) { 2765 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2766 words_pushed++; 2767 } 2768 2769 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2770 return count * 2; 2771 } 2772 2773 if (mode == PushPopFp) { 2774 bool odd = (count & 1) == 1; 2775 int push_slots = count + (odd ? 1 : 0); 2776 2777 if (count == 1) { 2778 // Stack pointer must be 16 bytes aligned 2779 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2780 return 1; 2781 } 2782 2783 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2784 words_pushed += 2; 2785 2786 for (int i = 2; i + 1 < count; i += 2) { 2787 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2788 words_pushed += 2; 2789 } 2790 2791 if (odd) { 2792 // Stack pointer must be 16 bytes aligned 2793 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2794 words_pushed++; 2795 } 2796 2797 assert(words_pushed == count, "oops, pushed != count"); 2798 2799 return count; 2800 } 2801 2802 return 0; 2803 } 2804 2805 // Return the number of dwords popped 2806 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2807 int words_pushed = 0; 2808 bool use_sve = false; 2809 int sve_vector_size_in_bytes = 0; 2810 2811 #ifdef COMPILER2 2812 use_sve = Matcher::supports_scalable_vector(); 2813 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2814 #endif 2815 // Scan bitset to accumulate register pairs 2816 unsigned char regs[32]; 2817 int count = 0; 2818 for (int reg = 0; reg <= 31; reg++) { 2819 if (1 & bitset) 2820 regs[count++] = reg; 2821 bitset >>= 1; 2822 } 2823 2824 if (count == 0) { 2825 return 0; 2826 } 2827 2828 if (mode == PushPopFull) { 2829 if (use_sve && sve_vector_size_in_bytes > 16) { 2830 mode = PushPopSVE; 2831 } else { 2832 mode = PushPopNeon; 2833 } 2834 } 2835 2836 #ifndef PRODUCT 2837 { 2838 char buffer[48]; 2839 if (mode == PushPopSVE) { 2840 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 2841 } else if (mode == PushPopNeon) { 2842 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 2843 } else { 2844 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 2845 } 2846 block_comment(buffer); 2847 } 2848 #endif 2849 2850 if (mode == PushPopSVE) { 2851 for (int i = count - 1; i >= 0; i--) { 2852 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2853 } 2854 add(stack, stack, sve_vector_size_in_bytes * count); 2855 return count * sve_vector_size_in_bytes / 8; 2856 } 2857 2858 if (mode == PushPopNeon) { 2859 if (count == 1) { 2860 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2861 return 2; 2862 } 2863 2864 bool odd = (count & 1) == 1; 2865 int push_slots = count + (odd ? 1 : 0); 2866 2867 if (odd) { 2868 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2869 words_pushed++; 2870 } 2871 2872 for (int i = 2; i + 1 < count; i += 2) { 2873 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2874 words_pushed += 2; 2875 } 2876 2877 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2878 words_pushed += 2; 2879 2880 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2881 2882 return count * 2; 2883 } 2884 2885 if (mode == PushPopFp) { 2886 bool odd = (count & 1) == 1; 2887 int push_slots = count + (odd ? 1 : 0); 2888 2889 if (count == 1) { 2890 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 2891 return 1; 2892 } 2893 2894 if (odd) { 2895 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2896 words_pushed++; 2897 } 2898 2899 for (int i = 2; i + 1 < count; i += 2) { 2900 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2901 words_pushed += 2; 2902 } 2903 2904 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 2905 words_pushed += 2; 2906 2907 assert(words_pushed == count, "oops, pushed != count"); 2908 2909 return count; 2910 } 2911 2912 return 0; 2913 } 2914 2915 // Return the number of dwords pushed 2916 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2917 bool use_sve = false; 2918 int sve_predicate_size_in_slots = 0; 2919 2920 #ifdef COMPILER2 2921 use_sve = Matcher::supports_scalable_vector(); 2922 if (use_sve) { 2923 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2924 } 2925 #endif 2926 2927 if (!use_sve) { 2928 return 0; 2929 } 2930 2931 unsigned char regs[PRegister::number_of_registers]; 2932 int count = 0; 2933 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2934 if (1 & bitset) 2935 regs[count++] = reg; 2936 bitset >>= 1; 2937 } 2938 2939 if (count == 0) { 2940 return 0; 2941 } 2942 2943 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2944 VMRegImpl::stack_slot_size * count, 16); 2945 sub(stack, stack, total_push_bytes); 2946 for (int i = 0; i < count; i++) { 2947 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2948 } 2949 return total_push_bytes / 8; 2950 } 2951 2952 // Return the number of dwords popped 2953 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2954 bool use_sve = false; 2955 int sve_predicate_size_in_slots = 0; 2956 2957 #ifdef COMPILER2 2958 use_sve = Matcher::supports_scalable_vector(); 2959 if (use_sve) { 2960 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2961 } 2962 #endif 2963 2964 if (!use_sve) { 2965 return 0; 2966 } 2967 2968 unsigned char regs[PRegister::number_of_registers]; 2969 int count = 0; 2970 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2971 if (1 & bitset) 2972 regs[count++] = reg; 2973 bitset >>= 1; 2974 } 2975 2976 if (count == 0) { 2977 return 0; 2978 } 2979 2980 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2981 VMRegImpl::stack_slot_size * count, 16); 2982 for (int i = count - 1; i >= 0; i--) { 2983 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2984 } 2985 add(stack, stack, total_pop_bytes); 2986 return total_pop_bytes / 8; 2987 } 2988 2989 #ifdef ASSERT 2990 void MacroAssembler::verify_heapbase(const char* msg) { 2991 #if 0 2992 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2993 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2994 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2995 // rheapbase is allocated as general register 2996 return; 2997 } 2998 if (CheckCompressedOops) { 2999 Label ok; 3000 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 3001 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3002 br(Assembler::EQ, ok); 3003 stop(msg); 3004 bind(ok); 3005 pop(1 << rscratch1->encoding(), sp); 3006 } 3007 #endif 3008 } 3009 #endif 3010 3011 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3012 assert_different_registers(value, tmp1, tmp2); 3013 Label done, tagged, weak_tagged; 3014 3015 cbz(value, done); // Use null as-is. 3016 tst(value, JNIHandles::tag_mask); // Test for tag. 3017 br(Assembler::NE, tagged); 3018 3019 // Resolve local handle 3020 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3021 verify_oop(value); 3022 b(done); 3023 3024 bind(tagged); 3025 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3026 tbnz(value, 0, weak_tagged); // Test for weak tag. 3027 3028 // Resolve global handle 3029 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3030 verify_oop(value); 3031 b(done); 3032 3033 bind(weak_tagged); 3034 // Resolve jweak. 3035 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3036 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3037 verify_oop(value); 3038 3039 bind(done); 3040 } 3041 3042 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3043 assert_different_registers(value, tmp1, tmp2); 3044 Label done; 3045 3046 cbz(value, done); // Use null as-is. 3047 3048 #ifdef ASSERT 3049 { 3050 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3051 Label valid_global_tag; 3052 tbnz(value, 1, valid_global_tag); // Test for global tag 3053 stop("non global jobject using resolve_global_jobject"); 3054 bind(valid_global_tag); 3055 } 3056 #endif 3057 3058 // Resolve global handle 3059 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3060 verify_oop(value); 3061 3062 bind(done); 3063 } 3064 3065 void MacroAssembler::stop(const char* msg) { 3066 BLOCK_COMMENT(msg); 3067 // load msg into r0 so we can access it from the signal handler 3068 // ExternalAddress enables saving and restoring via the code cache 3069 lea(c_rarg0, ExternalAddress((address) msg)); 3070 dcps1(0xdeae); 3071 SCCache::add_C_string(msg); 3072 } 3073 3074 void MacroAssembler::unimplemented(const char* what) { 3075 const char* buf = nullptr; 3076 { 3077 ResourceMark rm; 3078 stringStream ss; 3079 ss.print("unimplemented: %s", what); 3080 buf = code_string(ss.as_string()); 3081 } 3082 stop(buf); 3083 } 3084 3085 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3086 #ifdef ASSERT 3087 Label OK; 3088 br(cc, OK); 3089 stop(msg); 3090 bind(OK); 3091 #endif 3092 } 3093 3094 // If a constant does not fit in an immediate field, generate some 3095 // number of MOV instructions and then perform the operation. 3096 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3097 add_sub_imm_insn insn1, 3098 add_sub_reg_insn insn2, 3099 bool is32) { 3100 assert(Rd != zr, "Rd = zr and not setting flags?"); 3101 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3102 if (fits) { 3103 (this->*insn1)(Rd, Rn, imm); 3104 } else { 3105 if (uabs(imm) < (1 << 24)) { 3106 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3107 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3108 } else { 3109 assert_different_registers(Rd, Rn); 3110 mov(Rd, imm); 3111 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3112 } 3113 } 3114 } 3115 3116 // Separate vsn which sets the flags. Optimisations are more restricted 3117 // because we must set the flags correctly. 3118 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3119 add_sub_imm_insn insn1, 3120 add_sub_reg_insn insn2, 3121 bool is32) { 3122 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3123 if (fits) { 3124 (this->*insn1)(Rd, Rn, imm); 3125 } else { 3126 assert_different_registers(Rd, Rn); 3127 assert(Rd != zr, "overflow in immediate operand"); 3128 mov(Rd, imm); 3129 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3130 } 3131 } 3132 3133 3134 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3135 if (increment.is_register()) { 3136 add(Rd, Rn, increment.as_register()); 3137 } else { 3138 add(Rd, Rn, increment.as_constant()); 3139 } 3140 } 3141 3142 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3143 if (increment.is_register()) { 3144 addw(Rd, Rn, increment.as_register()); 3145 } else { 3146 addw(Rd, Rn, increment.as_constant()); 3147 } 3148 } 3149 3150 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3151 if (decrement.is_register()) { 3152 sub(Rd, Rn, decrement.as_register()); 3153 } else { 3154 sub(Rd, Rn, decrement.as_constant()); 3155 } 3156 } 3157 3158 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3159 if (decrement.is_register()) { 3160 subw(Rd, Rn, decrement.as_register()); 3161 } else { 3162 subw(Rd, Rn, decrement.as_constant()); 3163 } 3164 } 3165 3166 void MacroAssembler::reinit_heapbase() 3167 { 3168 if (UseCompressedOops) { 3169 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) { 3170 mov(rheapbase, CompressedOops::base()); 3171 } else { 3172 lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3173 ldr(rheapbase, Address(rheapbase)); 3174 } 3175 } 3176 } 3177 3178 // this simulates the behaviour of the x86 cmpxchg instruction using a 3179 // load linked/store conditional pair. we use the acquire/release 3180 // versions of these instructions so that we flush pending writes as 3181 // per Java semantics. 3182 3183 // n.b the x86 version assumes the old value to be compared against is 3184 // in rax and updates rax with the value located in memory if the 3185 // cmpxchg fails. we supply a register for the old value explicitly 3186 3187 // the aarch64 load linked/store conditional instructions do not 3188 // accept an offset. so, unlike x86, we must provide a plain register 3189 // to identify the memory word to be compared/exchanged rather than a 3190 // register+offset Address. 3191 3192 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3193 Label &succeed, Label *fail) { 3194 // oldv holds comparison value 3195 // newv holds value to write in exchange 3196 // addr identifies memory word to compare against/update 3197 if (UseLSE) { 3198 mov(tmp, oldv); 3199 casal(Assembler::xword, oldv, newv, addr); 3200 cmp(tmp, oldv); 3201 br(Assembler::EQ, succeed); 3202 membar(AnyAny); 3203 } else { 3204 Label retry_load, nope; 3205 prfm(Address(addr), PSTL1STRM); 3206 bind(retry_load); 3207 // flush and load exclusive from the memory location 3208 // and fail if it is not what we expect 3209 ldaxr(tmp, addr); 3210 cmp(tmp, oldv); 3211 br(Assembler::NE, nope); 3212 // if we store+flush with no intervening write tmp will be zero 3213 stlxr(tmp, newv, addr); 3214 cbzw(tmp, succeed); 3215 // retry so we only ever return after a load fails to compare 3216 // ensures we don't return a stale value after a failed write. 3217 b(retry_load); 3218 // if the memory word differs we return it in oldv and signal a fail 3219 bind(nope); 3220 membar(AnyAny); 3221 mov(oldv, tmp); 3222 } 3223 if (fail) 3224 b(*fail); 3225 } 3226 3227 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3228 Label &succeed, Label *fail) { 3229 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3230 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3231 } 3232 3233 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3234 Label &succeed, Label *fail) { 3235 // oldv holds comparison value 3236 // newv holds value to write in exchange 3237 // addr identifies memory word to compare against/update 3238 // tmp returns 0/1 for success/failure 3239 if (UseLSE) { 3240 mov(tmp, oldv); 3241 casal(Assembler::word, oldv, newv, addr); 3242 cmp(tmp, oldv); 3243 br(Assembler::EQ, succeed); 3244 membar(AnyAny); 3245 } else { 3246 Label retry_load, nope; 3247 prfm(Address(addr), PSTL1STRM); 3248 bind(retry_load); 3249 // flush and load exclusive from the memory location 3250 // and fail if it is not what we expect 3251 ldaxrw(tmp, addr); 3252 cmp(tmp, oldv); 3253 br(Assembler::NE, nope); 3254 // if we store+flush with no intervening write tmp will be zero 3255 stlxrw(tmp, newv, addr); 3256 cbzw(tmp, succeed); 3257 // retry so we only ever return after a load fails to compare 3258 // ensures we don't return a stale value after a failed write. 3259 b(retry_load); 3260 // if the memory word differs we return it in oldv and signal a fail 3261 bind(nope); 3262 membar(AnyAny); 3263 mov(oldv, tmp); 3264 } 3265 if (fail) 3266 b(*fail); 3267 } 3268 3269 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3270 // doesn't retry and may fail spuriously. If the oldval is wanted, 3271 // Pass a register for the result, otherwise pass noreg. 3272 3273 // Clobbers rscratch1 3274 void MacroAssembler::cmpxchg(Register addr, Register expected, 3275 Register new_val, 3276 enum operand_size size, 3277 bool acquire, bool release, 3278 bool weak, 3279 Register result) { 3280 if (result == noreg) result = rscratch1; 3281 BLOCK_COMMENT("cmpxchg {"); 3282 if (UseLSE) { 3283 mov(result, expected); 3284 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3285 compare_eq(result, expected, size); 3286 #ifdef ASSERT 3287 // Poison rscratch1 which is written on !UseLSE branch 3288 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3289 #endif 3290 } else { 3291 Label retry_load, done; 3292 prfm(Address(addr), PSTL1STRM); 3293 bind(retry_load); 3294 load_exclusive(result, addr, size, acquire); 3295 compare_eq(result, expected, size); 3296 br(Assembler::NE, done); 3297 store_exclusive(rscratch1, new_val, addr, size, release); 3298 if (weak) { 3299 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3300 } else { 3301 cbnzw(rscratch1, retry_load); 3302 } 3303 bind(done); 3304 } 3305 BLOCK_COMMENT("} cmpxchg"); 3306 } 3307 3308 // A generic comparison. Only compares for equality, clobbers rscratch1. 3309 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3310 if (size == xword) { 3311 cmp(rm, rn); 3312 } else if (size == word) { 3313 cmpw(rm, rn); 3314 } else if (size == halfword) { 3315 eorw(rscratch1, rm, rn); 3316 ands(zr, rscratch1, 0xffff); 3317 } else if (size == byte) { 3318 eorw(rscratch1, rm, rn); 3319 ands(zr, rscratch1, 0xff); 3320 } else { 3321 ShouldNotReachHere(); 3322 } 3323 } 3324 3325 3326 static bool different(Register a, RegisterOrConstant b, Register c) { 3327 if (b.is_constant()) 3328 return a != c; 3329 else 3330 return a != b.as_register() && a != c && b.as_register() != c; 3331 } 3332 3333 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3334 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3335 if (UseLSE) { \ 3336 prev = prev->is_valid() ? prev : zr; \ 3337 if (incr.is_register()) { \ 3338 AOP(sz, incr.as_register(), prev, addr); \ 3339 } else { \ 3340 mov(rscratch2, incr.as_constant()); \ 3341 AOP(sz, rscratch2, prev, addr); \ 3342 } \ 3343 return; \ 3344 } \ 3345 Register result = rscratch2; \ 3346 if (prev->is_valid()) \ 3347 result = different(prev, incr, addr) ? prev : rscratch2; \ 3348 \ 3349 Label retry_load; \ 3350 prfm(Address(addr), PSTL1STRM); \ 3351 bind(retry_load); \ 3352 LDXR(result, addr); \ 3353 OP(rscratch1, result, incr); \ 3354 STXR(rscratch2, rscratch1, addr); \ 3355 cbnzw(rscratch2, retry_load); \ 3356 if (prev->is_valid() && prev != result) { \ 3357 IOP(prev, rscratch1, incr); \ 3358 } \ 3359 } 3360 3361 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3362 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3363 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3364 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3365 3366 #undef ATOMIC_OP 3367 3368 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3369 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3370 if (UseLSE) { \ 3371 prev = prev->is_valid() ? prev : zr; \ 3372 AOP(sz, newv, prev, addr); \ 3373 return; \ 3374 } \ 3375 Register result = rscratch2; \ 3376 if (prev->is_valid()) \ 3377 result = different(prev, newv, addr) ? prev : rscratch2; \ 3378 \ 3379 Label retry_load; \ 3380 prfm(Address(addr), PSTL1STRM); \ 3381 bind(retry_load); \ 3382 LDXR(result, addr); \ 3383 STXR(rscratch1, newv, addr); \ 3384 cbnzw(rscratch1, retry_load); \ 3385 if (prev->is_valid() && prev != result) \ 3386 mov(prev, result); \ 3387 } 3388 3389 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3390 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3391 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3392 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3393 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3394 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3395 3396 #undef ATOMIC_XCHG 3397 3398 #ifndef PRODUCT 3399 extern "C" void findpc(intptr_t x); 3400 #endif 3401 3402 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3403 { 3404 // In order to get locks to work, we need to fake a in_VM state 3405 if (ShowMessageBoxOnError ) { 3406 JavaThread* thread = JavaThread::current(); 3407 JavaThreadState saved_state = thread->thread_state(); 3408 thread->set_thread_state(_thread_in_vm); 3409 #ifndef PRODUCT 3410 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3411 ttyLocker ttyl; 3412 BytecodeCounter::print(); 3413 } 3414 #endif 3415 if (os::message_box(msg, "Execution stopped, print registers?")) { 3416 ttyLocker ttyl; 3417 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3418 #ifndef PRODUCT 3419 tty->cr(); 3420 findpc(pc); 3421 tty->cr(); 3422 #endif 3423 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3424 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3425 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3426 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3427 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3428 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3429 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3430 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3431 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3432 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3433 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3434 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3435 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3436 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3437 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3438 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3439 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3440 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3441 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3442 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3443 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3444 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3445 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3446 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3447 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3448 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3449 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3450 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3451 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3452 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3453 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3454 BREAKPOINT; 3455 } 3456 } 3457 fatal("DEBUG MESSAGE: %s", msg); 3458 } 3459 3460 RegSet MacroAssembler::call_clobbered_gp_registers() { 3461 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3462 #ifndef R18_RESERVED 3463 regs += r18_tls; 3464 #endif 3465 return regs; 3466 } 3467 3468 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3469 int step = 4 * wordSize; 3470 push(call_clobbered_gp_registers() - exclude, sp); 3471 sub(sp, sp, step); 3472 mov(rscratch1, -step); 3473 // Push v0-v7, v16-v31. 3474 for (int i = 31; i>= 4; i -= 4) { 3475 if (i <= v7->encoding() || i >= v16->encoding()) 3476 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3477 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3478 } 3479 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3480 as_FloatRegister(3), T1D, Address(sp)); 3481 } 3482 3483 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3484 for (int i = 0; i < 32; i += 4) { 3485 if (i <= v7->encoding() || i >= v16->encoding()) 3486 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3487 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3488 } 3489 3490 reinitialize_ptrue(); 3491 3492 pop(call_clobbered_gp_registers() - exclude, sp); 3493 } 3494 3495 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3496 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3497 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3498 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3499 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3500 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3501 sve_str(as_FloatRegister(i), Address(sp, i)); 3502 } 3503 } else { 3504 int step = (save_vectors ? 8 : 4) * wordSize; 3505 mov(rscratch1, -step); 3506 sub(sp, sp, step); 3507 for (int i = 28; i >= 4; i -= 4) { 3508 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3509 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3510 } 3511 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3512 } 3513 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3514 sub(sp, sp, total_predicate_in_bytes); 3515 for (int i = 0; i < PRegister::number_of_registers; i++) { 3516 sve_str(as_PRegister(i), Address(sp, i)); 3517 } 3518 } 3519 } 3520 3521 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3522 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3523 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3524 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3525 sve_ldr(as_PRegister(i), Address(sp, i)); 3526 } 3527 add(sp, sp, total_predicate_in_bytes); 3528 } 3529 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3530 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3531 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3532 } 3533 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3534 } else { 3535 int step = (restore_vectors ? 8 : 4) * wordSize; 3536 for (int i = 0; i <= 28; i += 4) 3537 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3538 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3539 } 3540 3541 // We may use predicate registers and rely on ptrue with SVE, 3542 // regardless of wide vector (> 8 bytes) used or not. 3543 if (use_sve) { 3544 reinitialize_ptrue(); 3545 } 3546 3547 // integer registers except lr & sp 3548 pop(RegSet::range(r0, r17), sp); 3549 #ifdef R18_RESERVED 3550 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3551 pop(RegSet::range(r20, r29), sp); 3552 #else 3553 pop(RegSet::range(r18_tls, r29), sp); 3554 #endif 3555 } 3556 3557 /** 3558 * Helpers for multiply_to_len(). 3559 */ 3560 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3561 Register src1, Register src2) { 3562 adds(dest_lo, dest_lo, src1); 3563 adc(dest_hi, dest_hi, zr); 3564 adds(dest_lo, dest_lo, src2); 3565 adc(final_dest_hi, dest_hi, zr); 3566 } 3567 3568 // Generate an address from (r + r1 extend offset). "size" is the 3569 // size of the operand. The result may be in rscratch2. 3570 Address MacroAssembler::offsetted_address(Register r, Register r1, 3571 Address::extend ext, int offset, int size) { 3572 if (offset || (ext.shift() % size != 0)) { 3573 lea(rscratch2, Address(r, r1, ext)); 3574 return Address(rscratch2, offset); 3575 } else { 3576 return Address(r, r1, ext); 3577 } 3578 } 3579 3580 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3581 { 3582 assert(offset >= 0, "spill to negative address?"); 3583 // Offset reachable ? 3584 // Not aligned - 9 bits signed offset 3585 // Aligned - 12 bits unsigned offset shifted 3586 Register base = sp; 3587 if ((offset & (size-1)) && offset >= (1<<8)) { 3588 add(tmp, base, offset & ((1<<12)-1)); 3589 base = tmp; 3590 offset &= -1u<<12; 3591 } 3592 3593 if (offset >= (1<<12) * size) { 3594 add(tmp, base, offset & (((1<<12)-1)<<12)); 3595 base = tmp; 3596 offset &= ~(((1<<12)-1)<<12); 3597 } 3598 3599 return Address(base, offset); 3600 } 3601 3602 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3603 assert(offset >= 0, "spill to negative address?"); 3604 3605 Register base = sp; 3606 3607 // An immediate offset in the range 0 to 255 which is multiplied 3608 // by the current vector or predicate register size in bytes. 3609 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3610 return Address(base, offset / sve_reg_size_in_bytes); 3611 } 3612 3613 add(tmp, base, offset); 3614 return Address(tmp); 3615 } 3616 3617 // Checks whether offset is aligned. 3618 // Returns true if it is, else false. 3619 bool MacroAssembler::merge_alignment_check(Register base, 3620 size_t size, 3621 int64_t cur_offset, 3622 int64_t prev_offset) const { 3623 if (AvoidUnalignedAccesses) { 3624 if (base == sp) { 3625 // Checks whether low offset if aligned to pair of registers. 3626 int64_t pair_mask = size * 2 - 1; 3627 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3628 return (offset & pair_mask) == 0; 3629 } else { // If base is not sp, we can't guarantee the access is aligned. 3630 return false; 3631 } 3632 } else { 3633 int64_t mask = size - 1; 3634 // Load/store pair instruction only supports element size aligned offset. 3635 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3636 } 3637 } 3638 3639 // Checks whether current and previous loads/stores can be merged. 3640 // Returns true if it can be merged, else false. 3641 bool MacroAssembler::ldst_can_merge(Register rt, 3642 const Address &adr, 3643 size_t cur_size_in_bytes, 3644 bool is_store) const { 3645 address prev = pc() - NativeInstruction::instruction_size; 3646 address last = code()->last_insn(); 3647 3648 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3649 return false; 3650 } 3651 3652 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3653 return false; 3654 } 3655 3656 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3657 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3658 3659 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3660 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3661 3662 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3663 return false; 3664 } 3665 3666 int64_t max_offset = 63 * prev_size_in_bytes; 3667 int64_t min_offset = -64 * prev_size_in_bytes; 3668 3669 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3670 3671 // Only same base can be merged. 3672 if (adr.base() != prev_ldst->base()) { 3673 return false; 3674 } 3675 3676 int64_t cur_offset = adr.offset(); 3677 int64_t prev_offset = prev_ldst->offset(); 3678 size_t diff = abs(cur_offset - prev_offset); 3679 if (diff != prev_size_in_bytes) { 3680 return false; 3681 } 3682 3683 // Following cases can not be merged: 3684 // ldr x2, [x2, #8] 3685 // ldr x3, [x2, #16] 3686 // or: 3687 // ldr x2, [x3, #8] 3688 // ldr x2, [x3, #16] 3689 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3690 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3691 return false; 3692 } 3693 3694 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3695 // Offset range must be in ldp/stp instruction's range. 3696 if (low_offset > max_offset || low_offset < min_offset) { 3697 return false; 3698 } 3699 3700 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3701 return true; 3702 } 3703 3704 return false; 3705 } 3706 3707 // Merge current load/store with previous load/store into ldp/stp. 3708 void MacroAssembler::merge_ldst(Register rt, 3709 const Address &adr, 3710 size_t cur_size_in_bytes, 3711 bool is_store) { 3712 3713 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3714 3715 Register rt_low, rt_high; 3716 address prev = pc() - NativeInstruction::instruction_size; 3717 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3718 3719 int64_t offset; 3720 3721 if (adr.offset() < prev_ldst->offset()) { 3722 offset = adr.offset(); 3723 rt_low = rt; 3724 rt_high = prev_ldst->target(); 3725 } else { 3726 offset = prev_ldst->offset(); 3727 rt_low = prev_ldst->target(); 3728 rt_high = rt; 3729 } 3730 3731 Address adr_p = Address(prev_ldst->base(), offset); 3732 // Overwrite previous generated binary. 3733 code_section()->set_end(prev); 3734 3735 const size_t sz = prev_ldst->size_in_bytes(); 3736 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3737 if (!is_store) { 3738 BLOCK_COMMENT("merged ldr pair"); 3739 if (sz == 8) { 3740 ldp(rt_low, rt_high, adr_p); 3741 } else { 3742 ldpw(rt_low, rt_high, adr_p); 3743 } 3744 } else { 3745 BLOCK_COMMENT("merged str pair"); 3746 if (sz == 8) { 3747 stp(rt_low, rt_high, adr_p); 3748 } else { 3749 stpw(rt_low, rt_high, adr_p); 3750 } 3751 } 3752 } 3753 3754 /** 3755 * Multiply 64 bit by 64 bit first loop. 3756 */ 3757 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3758 Register y, Register y_idx, Register z, 3759 Register carry, Register product, 3760 Register idx, Register kdx) { 3761 // 3762 // jlong carry, x[], y[], z[]; 3763 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3764 // huge_128 product = y[idx] * x[xstart] + carry; 3765 // z[kdx] = (jlong)product; 3766 // carry = (jlong)(product >>> 64); 3767 // } 3768 // z[xstart] = carry; 3769 // 3770 3771 Label L_first_loop, L_first_loop_exit; 3772 Label L_one_x, L_one_y, L_multiply; 3773 3774 subsw(xstart, xstart, 1); 3775 br(Assembler::MI, L_one_x); 3776 3777 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3778 ldr(x_xstart, Address(rscratch1)); 3779 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3780 3781 bind(L_first_loop); 3782 subsw(idx, idx, 1); 3783 br(Assembler::MI, L_first_loop_exit); 3784 subsw(idx, idx, 1); 3785 br(Assembler::MI, L_one_y); 3786 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3787 ldr(y_idx, Address(rscratch1)); 3788 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3789 bind(L_multiply); 3790 3791 // AArch64 has a multiply-accumulate instruction that we can't use 3792 // here because it has no way to process carries, so we have to use 3793 // separate add and adc instructions. Bah. 3794 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3795 mul(product, x_xstart, y_idx); 3796 adds(product, product, carry); 3797 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3798 3799 subw(kdx, kdx, 2); 3800 ror(product, product, 32); // back to big-endian 3801 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3802 3803 b(L_first_loop); 3804 3805 bind(L_one_y); 3806 ldrw(y_idx, Address(y, 0)); 3807 b(L_multiply); 3808 3809 bind(L_one_x); 3810 ldrw(x_xstart, Address(x, 0)); 3811 b(L_first_loop); 3812 3813 bind(L_first_loop_exit); 3814 } 3815 3816 /** 3817 * Multiply 128 bit by 128. Unrolled inner loop. 3818 * 3819 */ 3820 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3821 Register carry, Register carry2, 3822 Register idx, Register jdx, 3823 Register yz_idx1, Register yz_idx2, 3824 Register tmp, Register tmp3, Register tmp4, 3825 Register tmp6, Register product_hi) { 3826 3827 // jlong carry, x[], y[], z[]; 3828 // int kdx = ystart+1; 3829 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3830 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3831 // jlong carry2 = (jlong)(tmp3 >>> 64); 3832 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3833 // carry = (jlong)(tmp4 >>> 64); 3834 // z[kdx+idx+1] = (jlong)tmp3; 3835 // z[kdx+idx] = (jlong)tmp4; 3836 // } 3837 // idx += 2; 3838 // if (idx > 0) { 3839 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3840 // z[kdx+idx] = (jlong)yz_idx1; 3841 // carry = (jlong)(yz_idx1 >>> 64); 3842 // } 3843 // 3844 3845 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3846 3847 lsrw(jdx, idx, 2); 3848 3849 bind(L_third_loop); 3850 3851 subsw(jdx, jdx, 1); 3852 br(Assembler::MI, L_third_loop_exit); 3853 subw(idx, idx, 4); 3854 3855 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3856 3857 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3858 3859 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3860 3861 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3862 ror(yz_idx2, yz_idx2, 32); 3863 3864 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3865 3866 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3867 umulh(tmp4, product_hi, yz_idx1); 3868 3869 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3870 ror(rscratch2, rscratch2, 32); 3871 3872 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3873 umulh(carry2, product_hi, yz_idx2); 3874 3875 // propagate sum of both multiplications into carry:tmp4:tmp3 3876 adds(tmp3, tmp3, carry); 3877 adc(tmp4, tmp4, zr); 3878 adds(tmp3, tmp3, rscratch1); 3879 adcs(tmp4, tmp4, tmp); 3880 adc(carry, carry2, zr); 3881 adds(tmp4, tmp4, rscratch2); 3882 adc(carry, carry, zr); 3883 3884 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3885 ror(tmp4, tmp4, 32); 3886 stp(tmp4, tmp3, Address(tmp6, 0)); 3887 3888 b(L_third_loop); 3889 bind (L_third_loop_exit); 3890 3891 andw (idx, idx, 0x3); 3892 cbz(idx, L_post_third_loop_done); 3893 3894 Label L_check_1; 3895 subsw(idx, idx, 2); 3896 br(Assembler::MI, L_check_1); 3897 3898 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3899 ldr(yz_idx1, Address(rscratch1, 0)); 3900 ror(yz_idx1, yz_idx1, 32); 3901 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3902 umulh(tmp4, product_hi, yz_idx1); 3903 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3904 ldr(yz_idx2, Address(rscratch1, 0)); 3905 ror(yz_idx2, yz_idx2, 32); 3906 3907 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3908 3909 ror(tmp3, tmp3, 32); 3910 str(tmp3, Address(rscratch1, 0)); 3911 3912 bind (L_check_1); 3913 3914 andw (idx, idx, 0x1); 3915 subsw(idx, idx, 1); 3916 br(Assembler::MI, L_post_third_loop_done); 3917 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3918 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3919 umulh(carry2, tmp4, product_hi); 3920 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3921 3922 add2_with_carry(carry2, tmp3, tmp4, carry); 3923 3924 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3925 extr(carry, carry2, tmp3, 32); 3926 3927 bind(L_post_third_loop_done); 3928 } 3929 3930 /** 3931 * Code for BigInteger::multiplyToLen() intrinsic. 3932 * 3933 * r0: x 3934 * r1: xlen 3935 * r2: y 3936 * r3: ylen 3937 * r4: z 3938 * r5: tmp0 3939 * r10: tmp1 3940 * r11: tmp2 3941 * r12: tmp3 3942 * r13: tmp4 3943 * r14: tmp5 3944 * r15: tmp6 3945 * r16: tmp7 3946 * 3947 */ 3948 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3949 Register z, Register tmp0, 3950 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3951 Register tmp5, Register tmp6, Register product_hi) { 3952 3953 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 3954 3955 const Register idx = tmp1; 3956 const Register kdx = tmp2; 3957 const Register xstart = tmp3; 3958 3959 const Register y_idx = tmp4; 3960 const Register carry = tmp5; 3961 const Register product = xlen; 3962 const Register x_xstart = tmp0; 3963 3964 // First Loop. 3965 // 3966 // final static long LONG_MASK = 0xffffffffL; 3967 // int xstart = xlen - 1; 3968 // int ystart = ylen - 1; 3969 // long carry = 0; 3970 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3971 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3972 // z[kdx] = (int)product; 3973 // carry = product >>> 32; 3974 // } 3975 // z[xstart] = (int)carry; 3976 // 3977 3978 movw(idx, ylen); // idx = ylen; 3979 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 3980 mov(carry, zr); // carry = 0; 3981 3982 Label L_done; 3983 3984 movw(xstart, xlen); 3985 subsw(xstart, xstart, 1); 3986 br(Assembler::MI, L_done); 3987 3988 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3989 3990 Label L_second_loop; 3991 cbzw(kdx, L_second_loop); 3992 3993 Label L_carry; 3994 subw(kdx, kdx, 1); 3995 cbzw(kdx, L_carry); 3996 3997 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3998 lsr(carry, carry, 32); 3999 subw(kdx, kdx, 1); 4000 4001 bind(L_carry); 4002 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4003 4004 // Second and third (nested) loops. 4005 // 4006 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4007 // carry = 0; 4008 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4009 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4010 // (z[k] & LONG_MASK) + carry; 4011 // z[k] = (int)product; 4012 // carry = product >>> 32; 4013 // } 4014 // z[i] = (int)carry; 4015 // } 4016 // 4017 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 4018 4019 const Register jdx = tmp1; 4020 4021 bind(L_second_loop); 4022 mov(carry, zr); // carry = 0; 4023 movw(jdx, ylen); // j = ystart+1 4024 4025 subsw(xstart, xstart, 1); // i = xstart-1; 4026 br(Assembler::MI, L_done); 4027 4028 str(z, Address(pre(sp, -4 * wordSize))); 4029 4030 Label L_last_x; 4031 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4032 subsw(xstart, xstart, 1); // i = xstart-1; 4033 br(Assembler::MI, L_last_x); 4034 4035 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4036 ldr(product_hi, Address(rscratch1)); 4037 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4038 4039 Label L_third_loop_prologue; 4040 bind(L_third_loop_prologue); 4041 4042 str(ylen, Address(sp, wordSize)); 4043 stp(x, xstart, Address(sp, 2 * wordSize)); 4044 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4045 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4046 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4047 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4048 4049 addw(tmp3, xlen, 1); 4050 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4051 subsw(tmp3, tmp3, 1); 4052 br(Assembler::MI, L_done); 4053 4054 lsr(carry, carry, 32); 4055 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4056 b(L_second_loop); 4057 4058 // Next infrequent code is moved outside loops. 4059 bind(L_last_x); 4060 ldrw(product_hi, Address(x, 0)); 4061 b(L_third_loop_prologue); 4062 4063 bind(L_done); 4064 } 4065 4066 // Code for BigInteger::mulAdd intrinsic 4067 // out = r0 4068 // in = r1 4069 // offset = r2 (already out.length-offset) 4070 // len = r3 4071 // k = r4 4072 // 4073 // pseudo code from java implementation: 4074 // carry = 0; 4075 // offset = out.length-offset - 1; 4076 // for (int j=len-1; j >= 0; j--) { 4077 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4078 // out[offset--] = (int)product; 4079 // carry = product >>> 32; 4080 // } 4081 // return (int)carry; 4082 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4083 Register len, Register k) { 4084 Label LOOP, END; 4085 // pre-loop 4086 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4087 csel(out, zr, out, Assembler::EQ); 4088 br(Assembler::EQ, END); 4089 add(in, in, len, LSL, 2); // in[j+1] address 4090 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4091 mov(out, zr); // used to keep carry now 4092 BIND(LOOP); 4093 ldrw(rscratch1, Address(pre(in, -4))); 4094 madd(rscratch1, rscratch1, k, out); 4095 ldrw(rscratch2, Address(pre(offset, -4))); 4096 add(rscratch1, rscratch1, rscratch2); 4097 strw(rscratch1, Address(offset)); 4098 lsr(out, rscratch1, 32); 4099 subs(len, len, 1); 4100 br(Assembler::NE, LOOP); 4101 BIND(END); 4102 } 4103 4104 /** 4105 * Emits code to update CRC-32 with a byte value according to constants in table 4106 * 4107 * @param [in,out]crc Register containing the crc. 4108 * @param [in]val Register containing the byte to fold into the CRC. 4109 * @param [in]table Register containing the table of crc constants. 4110 * 4111 * uint32_t crc; 4112 * val = crc_table[(val ^ crc) & 0xFF]; 4113 * crc = val ^ (crc >> 8); 4114 * 4115 */ 4116 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4117 eor(val, val, crc); 4118 andr(val, val, 0xff); 4119 ldrw(val, Address(table, val, Address::lsl(2))); 4120 eor(crc, val, crc, Assembler::LSR, 8); 4121 } 4122 4123 /** 4124 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4125 * 4126 * @param [in,out]crc Register containing the crc. 4127 * @param [in]v Register containing the 32-bit to fold into the CRC. 4128 * @param [in]table0 Register containing table 0 of crc constants. 4129 * @param [in]table1 Register containing table 1 of crc constants. 4130 * @param [in]table2 Register containing table 2 of crc constants. 4131 * @param [in]table3 Register containing table 3 of crc constants. 4132 * 4133 * uint32_t crc; 4134 * v = crc ^ v 4135 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4136 * 4137 */ 4138 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4139 Register table0, Register table1, Register table2, Register table3, 4140 bool upper) { 4141 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4142 uxtb(tmp, v); 4143 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4144 ubfx(tmp, v, 8, 8); 4145 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4146 eor(crc, crc, tmp); 4147 ubfx(tmp, v, 16, 8); 4148 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4149 eor(crc, crc, tmp); 4150 ubfx(tmp, v, 24, 8); 4151 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4152 eor(crc, crc, tmp); 4153 } 4154 4155 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4156 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4157 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4158 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4159 4160 subs(tmp0, len, 384); 4161 mvnw(crc, crc); 4162 br(Assembler::GE, CRC_by128_pre); 4163 BIND(CRC_less128); 4164 subs(len, len, 32); 4165 br(Assembler::GE, CRC_by32_loop); 4166 BIND(CRC_less32); 4167 adds(len, len, 32 - 4); 4168 br(Assembler::GE, CRC_by4_loop); 4169 adds(len, len, 4); 4170 br(Assembler::GT, CRC_by1_loop); 4171 b(L_exit); 4172 4173 BIND(CRC_by32_loop); 4174 ldp(tmp0, tmp1, Address(buf)); 4175 crc32x(crc, crc, tmp0); 4176 ldp(tmp2, tmp3, Address(buf, 16)); 4177 crc32x(crc, crc, tmp1); 4178 add(buf, buf, 32); 4179 crc32x(crc, crc, tmp2); 4180 subs(len, len, 32); 4181 crc32x(crc, crc, tmp3); 4182 br(Assembler::GE, CRC_by32_loop); 4183 cmn(len, (u1)32); 4184 br(Assembler::NE, CRC_less32); 4185 b(L_exit); 4186 4187 BIND(CRC_by4_loop); 4188 ldrw(tmp0, Address(post(buf, 4))); 4189 subs(len, len, 4); 4190 crc32w(crc, crc, tmp0); 4191 br(Assembler::GE, CRC_by4_loop); 4192 adds(len, len, 4); 4193 br(Assembler::LE, L_exit); 4194 BIND(CRC_by1_loop); 4195 ldrb(tmp0, Address(post(buf, 1))); 4196 subs(len, len, 1); 4197 crc32b(crc, crc, tmp0); 4198 br(Assembler::GT, CRC_by1_loop); 4199 b(L_exit); 4200 4201 BIND(CRC_by128_pre); 4202 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4203 4*256*sizeof(juint) + 8*sizeof(juint)); 4204 mov(crc, 0); 4205 crc32x(crc, crc, tmp0); 4206 crc32x(crc, crc, tmp1); 4207 4208 cbnz(len, CRC_less128); 4209 4210 BIND(L_exit); 4211 mvnw(crc, crc); 4212 } 4213 4214 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4215 Register len, Register tmp0, Register tmp1, Register tmp2, 4216 Register tmp3) { 4217 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4218 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4219 4220 mvnw(crc, crc); 4221 4222 subs(len, len, 128); 4223 br(Assembler::GE, CRC_by64_pre); 4224 BIND(CRC_less64); 4225 adds(len, len, 128-32); 4226 br(Assembler::GE, CRC_by32_loop); 4227 BIND(CRC_less32); 4228 adds(len, len, 32-4); 4229 br(Assembler::GE, CRC_by4_loop); 4230 adds(len, len, 4); 4231 br(Assembler::GT, CRC_by1_loop); 4232 b(L_exit); 4233 4234 BIND(CRC_by32_loop); 4235 ldp(tmp0, tmp1, Address(post(buf, 16))); 4236 subs(len, len, 32); 4237 crc32x(crc, crc, tmp0); 4238 ldr(tmp2, Address(post(buf, 8))); 4239 crc32x(crc, crc, tmp1); 4240 ldr(tmp3, Address(post(buf, 8))); 4241 crc32x(crc, crc, tmp2); 4242 crc32x(crc, crc, tmp3); 4243 br(Assembler::GE, CRC_by32_loop); 4244 cmn(len, (u1)32); 4245 br(Assembler::NE, CRC_less32); 4246 b(L_exit); 4247 4248 BIND(CRC_by4_loop); 4249 ldrw(tmp0, Address(post(buf, 4))); 4250 subs(len, len, 4); 4251 crc32w(crc, crc, tmp0); 4252 br(Assembler::GE, CRC_by4_loop); 4253 adds(len, len, 4); 4254 br(Assembler::LE, L_exit); 4255 BIND(CRC_by1_loop); 4256 ldrb(tmp0, Address(post(buf, 1))); 4257 subs(len, len, 1); 4258 crc32b(crc, crc, tmp0); 4259 br(Assembler::GT, CRC_by1_loop); 4260 b(L_exit); 4261 4262 BIND(CRC_by64_pre); 4263 sub(buf, buf, 8); 4264 ldp(tmp0, tmp1, Address(buf, 8)); 4265 crc32x(crc, crc, tmp0); 4266 ldr(tmp2, Address(buf, 24)); 4267 crc32x(crc, crc, tmp1); 4268 ldr(tmp3, Address(buf, 32)); 4269 crc32x(crc, crc, tmp2); 4270 ldr(tmp0, Address(buf, 40)); 4271 crc32x(crc, crc, tmp3); 4272 ldr(tmp1, Address(buf, 48)); 4273 crc32x(crc, crc, tmp0); 4274 ldr(tmp2, Address(buf, 56)); 4275 crc32x(crc, crc, tmp1); 4276 ldr(tmp3, Address(pre(buf, 64))); 4277 4278 b(CRC_by64_loop); 4279 4280 align(CodeEntryAlignment); 4281 BIND(CRC_by64_loop); 4282 subs(len, len, 64); 4283 crc32x(crc, crc, tmp2); 4284 ldr(tmp0, Address(buf, 8)); 4285 crc32x(crc, crc, tmp3); 4286 ldr(tmp1, Address(buf, 16)); 4287 crc32x(crc, crc, tmp0); 4288 ldr(tmp2, Address(buf, 24)); 4289 crc32x(crc, crc, tmp1); 4290 ldr(tmp3, Address(buf, 32)); 4291 crc32x(crc, crc, tmp2); 4292 ldr(tmp0, Address(buf, 40)); 4293 crc32x(crc, crc, tmp3); 4294 ldr(tmp1, Address(buf, 48)); 4295 crc32x(crc, crc, tmp0); 4296 ldr(tmp2, Address(buf, 56)); 4297 crc32x(crc, crc, tmp1); 4298 ldr(tmp3, Address(pre(buf, 64))); 4299 br(Assembler::GE, CRC_by64_loop); 4300 4301 // post-loop 4302 crc32x(crc, crc, tmp2); 4303 crc32x(crc, crc, tmp3); 4304 4305 sub(len, len, 64); 4306 add(buf, buf, 8); 4307 cmn(len, (u1)128); 4308 br(Assembler::NE, CRC_less64); 4309 BIND(L_exit); 4310 mvnw(crc, crc); 4311 } 4312 4313 /** 4314 * @param crc register containing existing CRC (32-bit) 4315 * @param buf register pointing to input byte buffer (byte*) 4316 * @param len register containing number of bytes 4317 * @param table register that will contain address of CRC table 4318 * @param tmp scratch register 4319 */ 4320 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4321 Register table0, Register table1, Register table2, Register table3, 4322 Register tmp, Register tmp2, Register tmp3) { 4323 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4324 4325 if (UseCryptoPmullForCRC32) { 4326 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4327 return; 4328 } 4329 4330 if (UseCRC32) { 4331 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4332 return; 4333 } 4334 4335 mvnw(crc, crc); 4336 4337 { 4338 uint64_t offset; 4339 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4340 add(table0, table0, offset); 4341 } 4342 add(table1, table0, 1*256*sizeof(juint)); 4343 add(table2, table0, 2*256*sizeof(juint)); 4344 add(table3, table0, 3*256*sizeof(juint)); 4345 4346 { // Neon code start 4347 cmp(len, (u1)64); 4348 br(Assembler::LT, L_by16); 4349 eor(v16, T16B, v16, v16); 4350 4351 Label L_fold; 4352 4353 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4354 4355 ld1(v0, v1, T2D, post(buf, 32)); 4356 ld1r(v4, T2D, post(tmp, 8)); 4357 ld1r(v5, T2D, post(tmp, 8)); 4358 ld1r(v6, T2D, post(tmp, 8)); 4359 ld1r(v7, T2D, post(tmp, 8)); 4360 mov(v16, S, 0, crc); 4361 4362 eor(v0, T16B, v0, v16); 4363 sub(len, len, 64); 4364 4365 BIND(L_fold); 4366 pmull(v22, T8H, v0, v5, T8B); 4367 pmull(v20, T8H, v0, v7, T8B); 4368 pmull(v23, T8H, v0, v4, T8B); 4369 pmull(v21, T8H, v0, v6, T8B); 4370 4371 pmull2(v18, T8H, v0, v5, T16B); 4372 pmull2(v16, T8H, v0, v7, T16B); 4373 pmull2(v19, T8H, v0, v4, T16B); 4374 pmull2(v17, T8H, v0, v6, T16B); 4375 4376 uzp1(v24, T8H, v20, v22); 4377 uzp2(v25, T8H, v20, v22); 4378 eor(v20, T16B, v24, v25); 4379 4380 uzp1(v26, T8H, v16, v18); 4381 uzp2(v27, T8H, v16, v18); 4382 eor(v16, T16B, v26, v27); 4383 4384 ushll2(v22, T4S, v20, T8H, 8); 4385 ushll(v20, T4S, v20, T4H, 8); 4386 4387 ushll2(v18, T4S, v16, T8H, 8); 4388 ushll(v16, T4S, v16, T4H, 8); 4389 4390 eor(v22, T16B, v23, v22); 4391 eor(v18, T16B, v19, v18); 4392 eor(v20, T16B, v21, v20); 4393 eor(v16, T16B, v17, v16); 4394 4395 uzp1(v17, T2D, v16, v20); 4396 uzp2(v21, T2D, v16, v20); 4397 eor(v17, T16B, v17, v21); 4398 4399 ushll2(v20, T2D, v17, T4S, 16); 4400 ushll(v16, T2D, v17, T2S, 16); 4401 4402 eor(v20, T16B, v20, v22); 4403 eor(v16, T16B, v16, v18); 4404 4405 uzp1(v17, T2D, v20, v16); 4406 uzp2(v21, T2D, v20, v16); 4407 eor(v28, T16B, v17, v21); 4408 4409 pmull(v22, T8H, v1, v5, T8B); 4410 pmull(v20, T8H, v1, v7, T8B); 4411 pmull(v23, T8H, v1, v4, T8B); 4412 pmull(v21, T8H, v1, v6, T8B); 4413 4414 pmull2(v18, T8H, v1, v5, T16B); 4415 pmull2(v16, T8H, v1, v7, T16B); 4416 pmull2(v19, T8H, v1, v4, T16B); 4417 pmull2(v17, T8H, v1, v6, T16B); 4418 4419 ld1(v0, v1, T2D, post(buf, 32)); 4420 4421 uzp1(v24, T8H, v20, v22); 4422 uzp2(v25, T8H, v20, v22); 4423 eor(v20, T16B, v24, v25); 4424 4425 uzp1(v26, T8H, v16, v18); 4426 uzp2(v27, T8H, v16, v18); 4427 eor(v16, T16B, v26, v27); 4428 4429 ushll2(v22, T4S, v20, T8H, 8); 4430 ushll(v20, T4S, v20, T4H, 8); 4431 4432 ushll2(v18, T4S, v16, T8H, 8); 4433 ushll(v16, T4S, v16, T4H, 8); 4434 4435 eor(v22, T16B, v23, v22); 4436 eor(v18, T16B, v19, v18); 4437 eor(v20, T16B, v21, v20); 4438 eor(v16, T16B, v17, v16); 4439 4440 uzp1(v17, T2D, v16, v20); 4441 uzp2(v21, T2D, v16, v20); 4442 eor(v16, T16B, v17, v21); 4443 4444 ushll2(v20, T2D, v16, T4S, 16); 4445 ushll(v16, T2D, v16, T2S, 16); 4446 4447 eor(v20, T16B, v22, v20); 4448 eor(v16, T16B, v16, v18); 4449 4450 uzp1(v17, T2D, v20, v16); 4451 uzp2(v21, T2D, v20, v16); 4452 eor(v20, T16B, v17, v21); 4453 4454 shl(v16, T2D, v28, 1); 4455 shl(v17, T2D, v20, 1); 4456 4457 eor(v0, T16B, v0, v16); 4458 eor(v1, T16B, v1, v17); 4459 4460 subs(len, len, 32); 4461 br(Assembler::GE, L_fold); 4462 4463 mov(crc, 0); 4464 mov(tmp, v0, D, 0); 4465 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4466 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4467 mov(tmp, v0, D, 1); 4468 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4469 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4470 mov(tmp, v1, D, 0); 4471 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4472 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4473 mov(tmp, v1, D, 1); 4474 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4475 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4476 4477 add(len, len, 32); 4478 } // Neon code end 4479 4480 BIND(L_by16); 4481 subs(len, len, 16); 4482 br(Assembler::GE, L_by16_loop); 4483 adds(len, len, 16-4); 4484 br(Assembler::GE, L_by4_loop); 4485 adds(len, len, 4); 4486 br(Assembler::GT, L_by1_loop); 4487 b(L_exit); 4488 4489 BIND(L_by4_loop); 4490 ldrw(tmp, Address(post(buf, 4))); 4491 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4492 subs(len, len, 4); 4493 br(Assembler::GE, L_by4_loop); 4494 adds(len, len, 4); 4495 br(Assembler::LE, L_exit); 4496 BIND(L_by1_loop); 4497 subs(len, len, 1); 4498 ldrb(tmp, Address(post(buf, 1))); 4499 update_byte_crc32(crc, tmp, table0); 4500 br(Assembler::GT, L_by1_loop); 4501 b(L_exit); 4502 4503 align(CodeEntryAlignment); 4504 BIND(L_by16_loop); 4505 subs(len, len, 16); 4506 ldp(tmp, tmp3, Address(post(buf, 16))); 4507 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4508 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4509 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4510 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4511 br(Assembler::GE, L_by16_loop); 4512 adds(len, len, 16-4); 4513 br(Assembler::GE, L_by4_loop); 4514 adds(len, len, 4); 4515 br(Assembler::GT, L_by1_loop); 4516 BIND(L_exit); 4517 mvnw(crc, crc); 4518 } 4519 4520 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4521 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4522 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4523 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4524 4525 subs(tmp0, len, 384); 4526 br(Assembler::GE, CRC_by128_pre); 4527 BIND(CRC_less128); 4528 subs(len, len, 32); 4529 br(Assembler::GE, CRC_by32_loop); 4530 BIND(CRC_less32); 4531 adds(len, len, 32 - 4); 4532 br(Assembler::GE, CRC_by4_loop); 4533 adds(len, len, 4); 4534 br(Assembler::GT, CRC_by1_loop); 4535 b(L_exit); 4536 4537 BIND(CRC_by32_loop); 4538 ldp(tmp0, tmp1, Address(buf)); 4539 crc32cx(crc, crc, tmp0); 4540 ldr(tmp2, Address(buf, 16)); 4541 crc32cx(crc, crc, tmp1); 4542 ldr(tmp3, Address(buf, 24)); 4543 crc32cx(crc, crc, tmp2); 4544 add(buf, buf, 32); 4545 subs(len, len, 32); 4546 crc32cx(crc, crc, tmp3); 4547 br(Assembler::GE, CRC_by32_loop); 4548 cmn(len, (u1)32); 4549 br(Assembler::NE, CRC_less32); 4550 b(L_exit); 4551 4552 BIND(CRC_by4_loop); 4553 ldrw(tmp0, Address(post(buf, 4))); 4554 subs(len, len, 4); 4555 crc32cw(crc, crc, tmp0); 4556 br(Assembler::GE, CRC_by4_loop); 4557 adds(len, len, 4); 4558 br(Assembler::LE, L_exit); 4559 BIND(CRC_by1_loop); 4560 ldrb(tmp0, Address(post(buf, 1))); 4561 subs(len, len, 1); 4562 crc32cb(crc, crc, tmp0); 4563 br(Assembler::GT, CRC_by1_loop); 4564 b(L_exit); 4565 4566 BIND(CRC_by128_pre); 4567 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4568 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4569 mov(crc, 0); 4570 crc32cx(crc, crc, tmp0); 4571 crc32cx(crc, crc, tmp1); 4572 4573 cbnz(len, CRC_less128); 4574 4575 BIND(L_exit); 4576 } 4577 4578 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4579 Register len, Register tmp0, Register tmp1, Register tmp2, 4580 Register tmp3) { 4581 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4582 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4583 4584 subs(len, len, 128); 4585 br(Assembler::GE, CRC_by64_pre); 4586 BIND(CRC_less64); 4587 adds(len, len, 128-32); 4588 br(Assembler::GE, CRC_by32_loop); 4589 BIND(CRC_less32); 4590 adds(len, len, 32-4); 4591 br(Assembler::GE, CRC_by4_loop); 4592 adds(len, len, 4); 4593 br(Assembler::GT, CRC_by1_loop); 4594 b(L_exit); 4595 4596 BIND(CRC_by32_loop); 4597 ldp(tmp0, tmp1, Address(post(buf, 16))); 4598 subs(len, len, 32); 4599 crc32cx(crc, crc, tmp0); 4600 ldr(tmp2, Address(post(buf, 8))); 4601 crc32cx(crc, crc, tmp1); 4602 ldr(tmp3, Address(post(buf, 8))); 4603 crc32cx(crc, crc, tmp2); 4604 crc32cx(crc, crc, tmp3); 4605 br(Assembler::GE, CRC_by32_loop); 4606 cmn(len, (u1)32); 4607 br(Assembler::NE, CRC_less32); 4608 b(L_exit); 4609 4610 BIND(CRC_by4_loop); 4611 ldrw(tmp0, Address(post(buf, 4))); 4612 subs(len, len, 4); 4613 crc32cw(crc, crc, tmp0); 4614 br(Assembler::GE, CRC_by4_loop); 4615 adds(len, len, 4); 4616 br(Assembler::LE, L_exit); 4617 BIND(CRC_by1_loop); 4618 ldrb(tmp0, Address(post(buf, 1))); 4619 subs(len, len, 1); 4620 crc32cb(crc, crc, tmp0); 4621 br(Assembler::GT, CRC_by1_loop); 4622 b(L_exit); 4623 4624 BIND(CRC_by64_pre); 4625 sub(buf, buf, 8); 4626 ldp(tmp0, tmp1, Address(buf, 8)); 4627 crc32cx(crc, crc, tmp0); 4628 ldr(tmp2, Address(buf, 24)); 4629 crc32cx(crc, crc, tmp1); 4630 ldr(tmp3, Address(buf, 32)); 4631 crc32cx(crc, crc, tmp2); 4632 ldr(tmp0, Address(buf, 40)); 4633 crc32cx(crc, crc, tmp3); 4634 ldr(tmp1, Address(buf, 48)); 4635 crc32cx(crc, crc, tmp0); 4636 ldr(tmp2, Address(buf, 56)); 4637 crc32cx(crc, crc, tmp1); 4638 ldr(tmp3, Address(pre(buf, 64))); 4639 4640 b(CRC_by64_loop); 4641 4642 align(CodeEntryAlignment); 4643 BIND(CRC_by64_loop); 4644 subs(len, len, 64); 4645 crc32cx(crc, crc, tmp2); 4646 ldr(tmp0, Address(buf, 8)); 4647 crc32cx(crc, crc, tmp3); 4648 ldr(tmp1, Address(buf, 16)); 4649 crc32cx(crc, crc, tmp0); 4650 ldr(tmp2, Address(buf, 24)); 4651 crc32cx(crc, crc, tmp1); 4652 ldr(tmp3, Address(buf, 32)); 4653 crc32cx(crc, crc, tmp2); 4654 ldr(tmp0, Address(buf, 40)); 4655 crc32cx(crc, crc, tmp3); 4656 ldr(tmp1, Address(buf, 48)); 4657 crc32cx(crc, crc, tmp0); 4658 ldr(tmp2, Address(buf, 56)); 4659 crc32cx(crc, crc, tmp1); 4660 ldr(tmp3, Address(pre(buf, 64))); 4661 br(Assembler::GE, CRC_by64_loop); 4662 4663 // post-loop 4664 crc32cx(crc, crc, tmp2); 4665 crc32cx(crc, crc, tmp3); 4666 4667 sub(len, len, 64); 4668 add(buf, buf, 8); 4669 cmn(len, (u1)128); 4670 br(Assembler::NE, CRC_less64); 4671 BIND(L_exit); 4672 } 4673 4674 /** 4675 * @param crc register containing existing CRC (32-bit) 4676 * @param buf register pointing to input byte buffer (byte*) 4677 * @param len register containing number of bytes 4678 * @param table register that will contain address of CRC table 4679 * @param tmp scratch register 4680 */ 4681 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4682 Register table0, Register table1, Register table2, Register table3, 4683 Register tmp, Register tmp2, Register tmp3) { 4684 if (UseCryptoPmullForCRC32) { 4685 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4686 } else { 4687 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4688 } 4689 } 4690 4691 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4692 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4693 Label CRC_by128_loop; 4694 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4695 4696 sub(len, len, 256); 4697 Register table = tmp0; 4698 { 4699 uint64_t offset; 4700 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4701 add(table, table, offset); 4702 } 4703 add(table, table, table_offset); 4704 4705 // Registers v0..v7 are used as data registers. 4706 // Registers v16..v31 are used as tmp registers. 4707 sub(buf, buf, 0x10); 4708 ldrq(v0, Address(buf, 0x10)); 4709 ldrq(v1, Address(buf, 0x20)); 4710 ldrq(v2, Address(buf, 0x30)); 4711 ldrq(v3, Address(buf, 0x40)); 4712 ldrq(v4, Address(buf, 0x50)); 4713 ldrq(v5, Address(buf, 0x60)); 4714 ldrq(v6, Address(buf, 0x70)); 4715 ldrq(v7, Address(pre(buf, 0x80))); 4716 4717 movi(v31, T4S, 0); 4718 mov(v31, S, 0, crc); 4719 eor(v0, T16B, v0, v31); 4720 4721 // Register v16 contains constants from the crc table. 4722 ldrq(v16, Address(table)); 4723 b(CRC_by128_loop); 4724 4725 align(OptoLoopAlignment); 4726 BIND(CRC_by128_loop); 4727 pmull (v17, T1Q, v0, v16, T1D); 4728 pmull2(v18, T1Q, v0, v16, T2D); 4729 ldrq(v0, Address(buf, 0x10)); 4730 eor3(v0, T16B, v17, v18, v0); 4731 4732 pmull (v19, T1Q, v1, v16, T1D); 4733 pmull2(v20, T1Q, v1, v16, T2D); 4734 ldrq(v1, Address(buf, 0x20)); 4735 eor3(v1, T16B, v19, v20, v1); 4736 4737 pmull (v21, T1Q, v2, v16, T1D); 4738 pmull2(v22, T1Q, v2, v16, T2D); 4739 ldrq(v2, Address(buf, 0x30)); 4740 eor3(v2, T16B, v21, v22, v2); 4741 4742 pmull (v23, T1Q, v3, v16, T1D); 4743 pmull2(v24, T1Q, v3, v16, T2D); 4744 ldrq(v3, Address(buf, 0x40)); 4745 eor3(v3, T16B, v23, v24, v3); 4746 4747 pmull (v25, T1Q, v4, v16, T1D); 4748 pmull2(v26, T1Q, v4, v16, T2D); 4749 ldrq(v4, Address(buf, 0x50)); 4750 eor3(v4, T16B, v25, v26, v4); 4751 4752 pmull (v27, T1Q, v5, v16, T1D); 4753 pmull2(v28, T1Q, v5, v16, T2D); 4754 ldrq(v5, Address(buf, 0x60)); 4755 eor3(v5, T16B, v27, v28, v5); 4756 4757 pmull (v29, T1Q, v6, v16, T1D); 4758 pmull2(v30, T1Q, v6, v16, T2D); 4759 ldrq(v6, Address(buf, 0x70)); 4760 eor3(v6, T16B, v29, v30, v6); 4761 4762 // Reuse registers v23, v24. 4763 // Using them won't block the first instruction of the next iteration. 4764 pmull (v23, T1Q, v7, v16, T1D); 4765 pmull2(v24, T1Q, v7, v16, T2D); 4766 ldrq(v7, Address(pre(buf, 0x80))); 4767 eor3(v7, T16B, v23, v24, v7); 4768 4769 subs(len, len, 0x80); 4770 br(Assembler::GE, CRC_by128_loop); 4771 4772 // fold into 512 bits 4773 // Use v31 for constants because v16 can be still in use. 4774 ldrq(v31, Address(table, 0x10)); 4775 4776 pmull (v17, T1Q, v0, v31, T1D); 4777 pmull2(v18, T1Q, v0, v31, T2D); 4778 eor3(v0, T16B, v17, v18, v4); 4779 4780 pmull (v19, T1Q, v1, v31, T1D); 4781 pmull2(v20, T1Q, v1, v31, T2D); 4782 eor3(v1, T16B, v19, v20, v5); 4783 4784 pmull (v21, T1Q, v2, v31, T1D); 4785 pmull2(v22, T1Q, v2, v31, T2D); 4786 eor3(v2, T16B, v21, v22, v6); 4787 4788 pmull (v23, T1Q, v3, v31, T1D); 4789 pmull2(v24, T1Q, v3, v31, T2D); 4790 eor3(v3, T16B, v23, v24, v7); 4791 4792 // fold into 128 bits 4793 // Use v17 for constants because v31 can be still in use. 4794 ldrq(v17, Address(table, 0x20)); 4795 pmull (v25, T1Q, v0, v17, T1D); 4796 pmull2(v26, T1Q, v0, v17, T2D); 4797 eor3(v3, T16B, v3, v25, v26); 4798 4799 // Use v18 for constants because v17 can be still in use. 4800 ldrq(v18, Address(table, 0x30)); 4801 pmull (v27, T1Q, v1, v18, T1D); 4802 pmull2(v28, T1Q, v1, v18, T2D); 4803 eor3(v3, T16B, v3, v27, v28); 4804 4805 // Use v19 for constants because v18 can be still in use. 4806 ldrq(v19, Address(table, 0x40)); 4807 pmull (v29, T1Q, v2, v19, T1D); 4808 pmull2(v30, T1Q, v2, v19, T2D); 4809 eor3(v0, T16B, v3, v29, v30); 4810 4811 add(len, len, 0x80); 4812 add(buf, buf, 0x10); 4813 4814 mov(tmp0, v0, D, 0); 4815 mov(tmp1, v0, D, 1); 4816 } 4817 4818 SkipIfEqual::SkipIfEqual( 4819 MacroAssembler* masm, const bool* flag_addr, bool value) { 4820 _masm = masm; 4821 uint64_t offset; 4822 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4823 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4824 if (value) { 4825 _masm->cbnzw(rscratch1, _label); 4826 } else { 4827 _masm->cbzw(rscratch1, _label); 4828 } 4829 } 4830 4831 SkipIfEqual::~SkipIfEqual() { 4832 _masm->bind(_label); 4833 } 4834 4835 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4836 Address adr; 4837 switch(dst.getMode()) { 4838 case Address::base_plus_offset: 4839 // This is the expected mode, although we allow all the other 4840 // forms below. 4841 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4842 break; 4843 default: 4844 lea(rscratch2, dst); 4845 adr = Address(rscratch2); 4846 break; 4847 } 4848 ldr(rscratch1, adr); 4849 add(rscratch1, rscratch1, src); 4850 str(rscratch1, adr); 4851 } 4852 4853 void MacroAssembler::cmpptr(Register src1, Address src2) { 4854 uint64_t offset; 4855 adrp(rscratch1, src2, offset); 4856 ldr(rscratch1, Address(rscratch1, offset)); 4857 cmp(src1, rscratch1); 4858 } 4859 4860 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4861 cmp(obj1, obj2); 4862 } 4863 4864 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4865 load_method_holder(rresult, rmethod); 4866 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4867 } 4868 4869 void MacroAssembler::load_method_holder(Register holder, Register method) { 4870 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4871 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4872 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4873 } 4874 4875 void MacroAssembler::load_klass(Register dst, Register src) { 4876 if (UseCompressedClassPointers) { 4877 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4878 decode_klass_not_null(dst); 4879 } else { 4880 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4881 } 4882 } 4883 4884 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4885 if (RestoreMXCSROnJNICalls) { 4886 Label OK; 4887 get_fpcr(tmp1); 4888 mov(tmp2, tmp1); 4889 // Set FPCR to the state we need. We do want Round to Nearest. We 4890 // don't want non-IEEE rounding modes or floating-point traps. 4891 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4892 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4893 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4894 eor(tmp2, tmp1, tmp2); 4895 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4896 set_fpcr(tmp1); 4897 bind(OK); 4898 } 4899 } 4900 4901 // ((OopHandle)result).resolve(); 4902 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4903 // OopHandle::resolve is an indirection. 4904 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4905 } 4906 4907 // ((WeakHandle)result).resolve(); 4908 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4909 assert_different_registers(result, tmp1, tmp2); 4910 Label resolved; 4911 4912 // A null weak handle resolves to null. 4913 cbz(result, resolved); 4914 4915 // Only 64 bit platforms support GCs that require a tmp register 4916 // WeakHandle::resolve is an indirection like jweak. 4917 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4918 result, Address(result), tmp1, tmp2); 4919 bind(resolved); 4920 } 4921 4922 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4923 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4924 ldr(dst, Address(rmethod, Method::const_offset())); 4925 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4926 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4927 ldr(dst, Address(dst, mirror_offset)); 4928 resolve_oop_handle(dst, tmp1, tmp2); 4929 } 4930 4931 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4932 if (UseCompressedClassPointers) { 4933 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4934 if (CompressedKlassPointers::base() == nullptr) { 4935 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4936 return; 4937 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4938 && CompressedKlassPointers::shift() == 0) { 4939 // Only the bottom 32 bits matter 4940 cmpw(trial_klass, tmp); 4941 return; 4942 } 4943 decode_klass_not_null(tmp); 4944 } else { 4945 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4946 } 4947 cmp(trial_klass, tmp); 4948 } 4949 4950 void MacroAssembler::store_klass(Register dst, Register src) { 4951 // FIXME: Should this be a store release? concurrent gcs assumes 4952 // klass length is valid if klass field is not null. 4953 if (UseCompressedClassPointers) { 4954 encode_klass_not_null(src); 4955 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4956 } else { 4957 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4958 } 4959 } 4960 4961 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4962 if (UseCompressedClassPointers) { 4963 // Store to klass gap in destination 4964 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4965 } 4966 } 4967 4968 // Algorithm must match CompressedOops::encode. 4969 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4970 #ifdef ASSERT 4971 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4972 #endif 4973 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4974 if (CompressedOops::base() == nullptr) { 4975 if (CompressedOops::shift() != 0) { 4976 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4977 lsr(d, s, LogMinObjAlignmentInBytes); 4978 } else { 4979 mov(d, s); 4980 } 4981 } else { 4982 subs(d, s, rheapbase); 4983 csel(d, d, zr, Assembler::HS); 4984 lsr(d, d, LogMinObjAlignmentInBytes); 4985 4986 /* Old algorithm: is this any worse? 4987 Label nonnull; 4988 cbnz(r, nonnull); 4989 sub(r, r, rheapbase); 4990 bind(nonnull); 4991 lsr(r, r, LogMinObjAlignmentInBytes); 4992 */ 4993 } 4994 } 4995 4996 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4997 #ifdef ASSERT 4998 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4999 if (CheckCompressedOops) { 5000 Label ok; 5001 cbnz(r, ok); 5002 stop("null oop passed to encode_heap_oop_not_null"); 5003 bind(ok); 5004 } 5005 #endif 5006 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5007 if (CompressedOops::base() != nullptr) { 5008 sub(r, r, rheapbase); 5009 } 5010 if (CompressedOops::shift() != 0) { 5011 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5012 lsr(r, r, LogMinObjAlignmentInBytes); 5013 } 5014 } 5015 5016 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5017 #ifdef ASSERT 5018 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5019 if (CheckCompressedOops) { 5020 Label ok; 5021 cbnz(src, ok); 5022 stop("null oop passed to encode_heap_oop_not_null2"); 5023 bind(ok); 5024 } 5025 #endif 5026 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5027 5028 Register data = src; 5029 if (CompressedOops::base() != nullptr) { 5030 sub(dst, src, rheapbase); 5031 data = dst; 5032 } 5033 if (CompressedOops::shift() != 0) { 5034 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5035 lsr(dst, data, LogMinObjAlignmentInBytes); 5036 data = dst; 5037 } 5038 if (data == src) 5039 mov(dst, src); 5040 } 5041 5042 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5043 #ifdef ASSERT 5044 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5045 #endif 5046 if (CompressedOops::base() == nullptr) { 5047 if (CompressedOops::shift() != 0 || d != s) { 5048 lsl(d, s, CompressedOops::shift()); 5049 } 5050 } else { 5051 Label done; 5052 if (d != s) 5053 mov(d, s); 5054 cbz(s, done); 5055 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5056 bind(done); 5057 } 5058 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5059 } 5060 5061 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5062 assert (UseCompressedOops, "should only be used for compressed headers"); 5063 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5064 // Cannot assert, unverified entry point counts instructions (see .ad file) 5065 // vtableStubs also counts instructions in pd_code_size_limit. 5066 // Also do not verify_oop as this is called by verify_oop. 5067 if (CompressedOops::shift() != 0) { 5068 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5069 if (CompressedOops::base() != nullptr) { 5070 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5071 } else { 5072 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5073 } 5074 } else { 5075 assert (CompressedOops::base() == nullptr, "sanity"); 5076 } 5077 } 5078 5079 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5080 assert (UseCompressedOops, "should only be used for compressed headers"); 5081 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5082 // Cannot assert, unverified entry point counts instructions (see .ad file) 5083 // vtableStubs also counts instructions in pd_code_size_limit. 5084 // Also do not verify_oop as this is called by verify_oop. 5085 if (CompressedOops::shift() != 0) { 5086 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5087 if (CompressedOops::base() != nullptr) { 5088 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5089 } else { 5090 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5091 } 5092 } else { 5093 assert (CompressedOops::base() == nullptr, "sanity"); 5094 if (dst != src) { 5095 mov(dst, src); 5096 } 5097 } 5098 } 5099 5100 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5101 5102 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5103 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5104 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5105 5106 if (_klass_decode_mode != KlassDecodeNone) { 5107 return _klass_decode_mode; 5108 } 5109 5110 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 5111 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 5112 5113 if (CompressedKlassPointers::base() == nullptr) { 5114 return (_klass_decode_mode = KlassDecodeZero); 5115 } 5116 5117 if (operand_valid_for_logical_immediate( 5118 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 5119 const size_t range = CompressedKlassPointers::klass_range_end() - CompressedKlassPointers::base(); 5120 const uint64_t range_mask = (1ULL << log2i(range)) - 1; 5121 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 5122 return (_klass_decode_mode = KlassDecodeXor); 5123 } 5124 } 5125 5126 const uint64_t shifted_base = 5127 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5128 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 5129 "compressed class base bad alignment"); 5130 5131 return (_klass_decode_mode = KlassDecodeMovk); 5132 } 5133 5134 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5135 switch (klass_decode_mode()) { 5136 case KlassDecodeZero: 5137 if (CompressedKlassPointers::shift() != 0) { 5138 lsr(dst, src, LogKlassAlignmentInBytes); 5139 } else { 5140 if (dst != src) mov(dst, src); 5141 } 5142 break; 5143 5144 case KlassDecodeXor: 5145 if (CompressedKlassPointers::shift() != 0) { 5146 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5147 lsr(dst, dst, LogKlassAlignmentInBytes); 5148 } else { 5149 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5150 } 5151 break; 5152 5153 case KlassDecodeMovk: 5154 if (CompressedKlassPointers::shift() != 0) { 5155 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 5156 } else { 5157 movw(dst, src); 5158 } 5159 break; 5160 5161 case KlassDecodeNone: 5162 ShouldNotReachHere(); 5163 break; 5164 } 5165 } 5166 5167 void MacroAssembler::encode_klass_not_null(Register r) { 5168 encode_klass_not_null(r, r); 5169 } 5170 5171 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5172 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5173 5174 switch (klass_decode_mode()) { 5175 case KlassDecodeZero: 5176 if (CompressedKlassPointers::shift() != 0) { 5177 lsl(dst, src, LogKlassAlignmentInBytes); 5178 } else { 5179 if (dst != src) mov(dst, src); 5180 } 5181 break; 5182 5183 case KlassDecodeXor: 5184 if (CompressedKlassPointers::shift() != 0) { 5185 lsl(dst, src, LogKlassAlignmentInBytes); 5186 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5187 } else { 5188 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5189 } 5190 break; 5191 5192 case KlassDecodeMovk: { 5193 const uint64_t shifted_base = 5194 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5195 5196 if (dst != src) movw(dst, src); 5197 movk(dst, shifted_base >> 32, 32); 5198 5199 if (CompressedKlassPointers::shift() != 0) { 5200 lsl(dst, dst, LogKlassAlignmentInBytes); 5201 } 5202 5203 break; 5204 } 5205 5206 case KlassDecodeNone: 5207 ShouldNotReachHere(); 5208 break; 5209 } 5210 } 5211 5212 void MacroAssembler::decode_klass_not_null(Register r) { 5213 decode_klass_not_null(r, r); 5214 } 5215 5216 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5217 #ifdef ASSERT 5218 { 5219 ThreadInVMfromUnknown tiv; 5220 assert (UseCompressedOops, "should only be used for compressed oops"); 5221 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5222 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5223 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5224 } 5225 #endif 5226 int oop_index = oop_recorder()->find_index(obj); 5227 InstructionMark im(this); 5228 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5229 code_section()->relocate(inst_mark(), rspec); 5230 movz(dst, 0xDEAD, 16); 5231 movk(dst, 0xBEEF); 5232 } 5233 5234 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5235 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5236 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5237 int index = oop_recorder()->find_index(k); 5238 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5239 5240 InstructionMark im(this); 5241 RelocationHolder rspec = metadata_Relocation::spec(index); 5242 code_section()->relocate(inst_mark(), rspec); 5243 narrowKlass nk = CompressedKlassPointers::encode(k); 5244 movz(dst, (nk >> 16), 16); 5245 movk(dst, nk & 0xffff); 5246 } 5247 5248 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5249 Register dst, Address src, 5250 Register tmp1, Register tmp2) { 5251 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5252 decorators = AccessInternal::decorator_fixup(decorators, type); 5253 bool as_raw = (decorators & AS_RAW) != 0; 5254 if (as_raw) { 5255 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5256 } else { 5257 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5258 } 5259 } 5260 5261 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5262 Address dst, Register val, 5263 Register tmp1, Register tmp2, Register tmp3) { 5264 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5265 decorators = AccessInternal::decorator_fixup(decorators, type); 5266 bool as_raw = (decorators & AS_RAW) != 0; 5267 if (as_raw) { 5268 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5269 } else { 5270 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5271 } 5272 } 5273 5274 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5275 Register tmp2, DecoratorSet decorators) { 5276 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5277 } 5278 5279 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5280 Register tmp2, DecoratorSet decorators) { 5281 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5282 } 5283 5284 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5285 Register tmp2, Register tmp3, DecoratorSet decorators) { 5286 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5287 } 5288 5289 // Used for storing nulls. 5290 void MacroAssembler::store_heap_oop_null(Address dst) { 5291 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5292 } 5293 5294 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5295 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5296 int index = oop_recorder()->allocate_metadata_index(obj); 5297 RelocationHolder rspec = metadata_Relocation::spec(index); 5298 return Address((address)obj, rspec); 5299 } 5300 5301 // Move an oop into a register. 5302 void MacroAssembler::movoop(Register dst, jobject obj) { 5303 int oop_index; 5304 if (obj == nullptr) { 5305 oop_index = oop_recorder()->allocate_oop_index(obj); 5306 } else { 5307 #ifdef ASSERT 5308 { 5309 ThreadInVMfromUnknown tiv; 5310 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5311 } 5312 #endif 5313 oop_index = oop_recorder()->find_index(obj); 5314 } 5315 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5316 5317 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5318 mov(dst, Address((address)obj, rspec)); 5319 } else { 5320 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5321 ldr_constant(dst, Address(dummy, rspec)); 5322 } 5323 5324 } 5325 5326 // Move a metadata address into a register. 5327 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5328 int oop_index; 5329 if (obj == nullptr) { 5330 oop_index = oop_recorder()->allocate_metadata_index(obj); 5331 } else { 5332 oop_index = oop_recorder()->find_index(obj); 5333 } 5334 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5335 mov(dst, Address((address)obj, rspec)); 5336 } 5337 5338 Address MacroAssembler::constant_oop_address(jobject obj) { 5339 #ifdef ASSERT 5340 { 5341 ThreadInVMfromUnknown tiv; 5342 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5343 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5344 } 5345 #endif 5346 int oop_index = oop_recorder()->find_index(obj); 5347 return Address((address)obj, oop_Relocation::spec(oop_index)); 5348 } 5349 5350 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5351 void MacroAssembler::tlab_allocate(Register obj, 5352 Register var_size_in_bytes, 5353 int con_size_in_bytes, 5354 Register t1, 5355 Register t2, 5356 Label& slow_case) { 5357 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5358 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5359 } 5360 5361 void MacroAssembler::verify_tlab() { 5362 #ifdef ASSERT 5363 if (UseTLAB && VerifyOops) { 5364 Label next, ok; 5365 5366 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5367 5368 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5369 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5370 cmp(rscratch2, rscratch1); 5371 br(Assembler::HS, next); 5372 STOP("assert(top >= start)"); 5373 should_not_reach_here(); 5374 5375 bind(next); 5376 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5377 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5378 cmp(rscratch2, rscratch1); 5379 br(Assembler::HS, ok); 5380 STOP("assert(top <= end)"); 5381 should_not_reach_here(); 5382 5383 bind(ok); 5384 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5385 } 5386 #endif 5387 } 5388 5389 // Writes to stack successive pages until offset reached to check for 5390 // stack overflow + shadow pages. This clobbers tmp. 5391 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5392 assert_different_registers(tmp, size, rscratch1); 5393 mov(tmp, sp); 5394 // Bang stack for total size given plus shadow page size. 5395 // Bang one page at a time because large size can bang beyond yellow and 5396 // red zones. 5397 Label loop; 5398 mov(rscratch1, (int)os::vm_page_size()); 5399 bind(loop); 5400 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5401 subsw(size, size, rscratch1); 5402 str(size, Address(tmp)); 5403 br(Assembler::GT, loop); 5404 5405 // Bang down shadow pages too. 5406 // At this point, (tmp-0) is the last address touched, so don't 5407 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5408 // was post-decremented.) Skip this address by starting at i=1, and 5409 // touch a few more pages below. N.B. It is important to touch all 5410 // the way down to and including i=StackShadowPages. 5411 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5412 // this could be any sized move but this is can be a debugging crumb 5413 // so the bigger the better. 5414 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5415 str(size, Address(tmp)); 5416 } 5417 } 5418 5419 // Move the address of the polling page into dest. 5420 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5421 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5422 } 5423 5424 // Read the polling page. The address of the polling page must 5425 // already be in r. 5426 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5427 address mark; 5428 { 5429 InstructionMark im(this); 5430 code_section()->relocate(inst_mark(), rtype); 5431 ldrw(zr, Address(r, 0)); 5432 mark = inst_mark(); 5433 } 5434 verify_cross_modify_fence_not_required(); 5435 return mark; 5436 } 5437 5438 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5439 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5440 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5441 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5442 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5443 int64_t offset_low = dest_page - low_page; 5444 int64_t offset_high = dest_page - high_page; 5445 5446 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5447 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5448 5449 InstructionMark im(this); 5450 code_section()->relocate(inst_mark(), dest.rspec()); 5451 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5452 // the code cache so that if it is relocated we know it will still reach 5453 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5454 _adrp(reg1, dest.target()); 5455 } else { 5456 uint64_t target = (uint64_t)dest.target(); 5457 uint64_t adrp_target 5458 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5459 5460 _adrp(reg1, (address)adrp_target); 5461 movk(reg1, target >> 32, 32); 5462 } 5463 byte_offset = (uint64_t)dest.target() & 0xfff; 5464 } 5465 5466 void MacroAssembler::load_byte_map_base(Register reg) { 5467 CardTable::CardValue* byte_map_base = 5468 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5469 5470 // Strictly speaking the byte_map_base isn't an address at all, and it might 5471 // even be negative. It is thus materialised as a constant. 5472 #if INCLUDE_CDS 5473 if (SCCache::is_on_for_write()) { 5474 // SCA needs relocation info for card table base 5475 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base))); 5476 } else { 5477 #endif 5478 mov(reg, (uint64_t)byte_map_base); 5479 #if INCLUDE_CDS 5480 } 5481 #endif 5482 } 5483 5484 void MacroAssembler::load_aotrc_address(Register reg, address a) { 5485 #if INCLUDE_CDS 5486 assert(AOTRuntimeConstants::contains(a), "address out of range for data area"); 5487 if (SCCache::is_on_for_write()) { 5488 // all aotrc field addresses should be registered in the SCC address table 5489 lea(reg, ExternalAddress(a)); 5490 } else { 5491 mov(reg, (uint64_t)a); 5492 } 5493 #else 5494 ShouldNotReachHere(); 5495 #endif 5496 } 5497 5498 void MacroAssembler::build_frame(int framesize) { 5499 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5500 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5501 protect_return_address(); 5502 if (framesize < ((1 << 9) + 2 * wordSize)) { 5503 sub(sp, sp, framesize); 5504 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5505 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5506 } else { 5507 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5508 if (PreserveFramePointer) mov(rfp, sp); 5509 if (framesize < ((1 << 12) + 2 * wordSize)) 5510 sub(sp, sp, framesize - 2 * wordSize); 5511 else { 5512 mov(rscratch1, framesize - 2 * wordSize); 5513 sub(sp, sp, rscratch1); 5514 } 5515 } 5516 verify_cross_modify_fence_not_required(); 5517 } 5518 5519 void MacroAssembler::remove_frame(int framesize) { 5520 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5521 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5522 if (framesize < ((1 << 9) + 2 * wordSize)) { 5523 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5524 add(sp, sp, framesize); 5525 } else { 5526 if (framesize < ((1 << 12) + 2 * wordSize)) 5527 add(sp, sp, framesize - 2 * wordSize); 5528 else { 5529 mov(rscratch1, framesize - 2 * wordSize); 5530 add(sp, sp, rscratch1); 5531 } 5532 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5533 } 5534 authenticate_return_address(); 5535 } 5536 5537 5538 // This method counts leading positive bytes (highest bit not set) in provided byte array 5539 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5540 // Simple and most common case of aligned small array which is not at the 5541 // end of memory page is placed here. All other cases are in stub. 5542 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5543 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5544 assert_different_registers(ary1, len, result); 5545 5546 mov(result, len); 5547 cmpw(len, 0); 5548 br(LE, DONE); 5549 cmpw(len, 4 * wordSize); 5550 br(GE, STUB_LONG); // size > 32 then go to stub 5551 5552 int shift = 64 - exact_log2(os::vm_page_size()); 5553 lsl(rscratch1, ary1, shift); 5554 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5555 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5556 br(CS, STUB); // at the end of page then go to stub 5557 subs(len, len, wordSize); 5558 br(LT, END); 5559 5560 BIND(LOOP); 5561 ldr(rscratch1, Address(post(ary1, wordSize))); 5562 tst(rscratch1, UPPER_BIT_MASK); 5563 br(NE, SET_RESULT); 5564 subs(len, len, wordSize); 5565 br(GE, LOOP); 5566 cmpw(len, -wordSize); 5567 br(EQ, DONE); 5568 5569 BIND(END); 5570 ldr(rscratch1, Address(ary1)); 5571 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5572 lslv(rscratch1, rscratch1, rscratch2); 5573 tst(rscratch1, UPPER_BIT_MASK); 5574 br(NE, SET_RESULT); 5575 b(DONE); 5576 5577 BIND(STUB); 5578 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5579 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5580 address tpc1 = trampoline_call(count_pos); 5581 if (tpc1 == nullptr) { 5582 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5583 postcond(pc() == badAddress); 5584 return nullptr; 5585 } 5586 b(DONE); 5587 5588 BIND(STUB_LONG); 5589 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5590 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5591 address tpc2 = trampoline_call(count_pos_long); 5592 if (tpc2 == nullptr) { 5593 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5594 postcond(pc() == badAddress); 5595 return nullptr; 5596 } 5597 b(DONE); 5598 5599 BIND(SET_RESULT); 5600 5601 add(len, len, wordSize); 5602 sub(result, result, len); 5603 5604 BIND(DONE); 5605 postcond(pc() != badAddress); 5606 return pc(); 5607 } 5608 5609 // Clobbers: rscratch1, rscratch2, rflags 5610 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5611 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5612 Register tmp4, Register tmp5, Register result, 5613 Register cnt1, int elem_size) { 5614 Label DONE, SAME; 5615 Register tmp1 = rscratch1; 5616 Register tmp2 = rscratch2; 5617 int elem_per_word = wordSize/elem_size; 5618 int log_elem_size = exact_log2(elem_size); 5619 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5620 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5621 int base_offset 5622 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5623 // When the length offset is not aligned to 8 bytes, 5624 // then we align it down. This is valid because the new 5625 // offset will always be the klass which is the same 5626 // for type arrays. 5627 int start_offset = align_down(length_offset, BytesPerWord); 5628 int extra_length = base_offset - start_offset; 5629 assert(start_offset == length_offset || start_offset == klass_offset, 5630 "start offset must be 8-byte-aligned or be the klass offset"); 5631 assert(base_offset != start_offset, "must include the length field"); 5632 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5633 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5634 5635 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5636 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5637 5638 #ifndef PRODUCT 5639 { 5640 const char kind = (elem_size == 2) ? 'U' : 'L'; 5641 char comment[64]; 5642 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5643 BLOCK_COMMENT(comment); 5644 } 5645 #endif 5646 5647 // if (a1 == a2) 5648 // return true; 5649 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5650 br(EQ, SAME); 5651 5652 if (UseSimpleArrayEquals) { 5653 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5654 // if (a1 == nullptr || a2 == nullptr) 5655 // return false; 5656 // a1 & a2 == 0 means (some-pointer is null) or 5657 // (very-rare-or-even-probably-impossible-pointer-values) 5658 // so, we can save one branch in most cases 5659 tst(a1, a2); 5660 mov(result, false); 5661 br(EQ, A_MIGHT_BE_NULL); 5662 // if (a1.length != a2.length) 5663 // return false; 5664 bind(A_IS_NOT_NULL); 5665 ldrw(cnt1, Address(a1, length_offset)); 5666 // Increase loop counter by diff between base- and actual start-offset. 5667 addw(cnt1, cnt1, extra_length); 5668 lea(a1, Address(a1, start_offset)); 5669 lea(a2, Address(a2, start_offset)); 5670 // Check for short strings, i.e. smaller than wordSize. 5671 subs(cnt1, cnt1, elem_per_word); 5672 br(Assembler::LT, SHORT); 5673 // Main 8 byte comparison loop. 5674 bind(NEXT_WORD); { 5675 ldr(tmp1, Address(post(a1, wordSize))); 5676 ldr(tmp2, Address(post(a2, wordSize))); 5677 subs(cnt1, cnt1, elem_per_word); 5678 eor(tmp5, tmp1, tmp2); 5679 cbnz(tmp5, DONE); 5680 } br(GT, NEXT_WORD); 5681 // Last longword. In the case where length == 4 we compare the 5682 // same longword twice, but that's still faster than another 5683 // conditional branch. 5684 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5685 // length == 4. 5686 if (log_elem_size > 0) 5687 lsl(cnt1, cnt1, log_elem_size); 5688 ldr(tmp3, Address(a1, cnt1)); 5689 ldr(tmp4, Address(a2, cnt1)); 5690 eor(tmp5, tmp3, tmp4); 5691 cbnz(tmp5, DONE); 5692 b(SAME); 5693 bind(A_MIGHT_BE_NULL); 5694 // in case both a1 and a2 are not-null, proceed with loads 5695 cbz(a1, DONE); 5696 cbz(a2, DONE); 5697 b(A_IS_NOT_NULL); 5698 bind(SHORT); 5699 5700 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5701 { 5702 ldrw(tmp1, Address(post(a1, 4))); 5703 ldrw(tmp2, Address(post(a2, 4))); 5704 eorw(tmp5, tmp1, tmp2); 5705 cbnzw(tmp5, DONE); 5706 } 5707 bind(TAIL03); 5708 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5709 { 5710 ldrh(tmp3, Address(post(a1, 2))); 5711 ldrh(tmp4, Address(post(a2, 2))); 5712 eorw(tmp5, tmp3, tmp4); 5713 cbnzw(tmp5, DONE); 5714 } 5715 bind(TAIL01); 5716 if (elem_size == 1) { // Only needed when comparing byte arrays. 5717 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5718 { 5719 ldrb(tmp1, a1); 5720 ldrb(tmp2, a2); 5721 eorw(tmp5, tmp1, tmp2); 5722 cbnzw(tmp5, DONE); 5723 } 5724 } 5725 } else { 5726 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5727 CSET_EQ, LAST_CHECK; 5728 mov(result, false); 5729 cbz(a1, DONE); 5730 ldrw(cnt1, Address(a1, length_offset)); 5731 cbz(a2, DONE); 5732 // Increase loop counter by diff between base- and actual start-offset. 5733 addw(cnt1, cnt1, extra_length); 5734 5735 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5736 // faster to perform another branch before comparing a1 and a2 5737 cmp(cnt1, (u1)elem_per_word); 5738 br(LE, SHORT); // short or same 5739 ldr(tmp3, Address(pre(a1, start_offset))); 5740 subs(zr, cnt1, stubBytesThreshold); 5741 br(GE, STUB); 5742 ldr(tmp4, Address(pre(a2, start_offset))); 5743 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5744 5745 // Main 16 byte comparison loop with 2 exits 5746 bind(NEXT_DWORD); { 5747 ldr(tmp1, Address(pre(a1, wordSize))); 5748 ldr(tmp2, Address(pre(a2, wordSize))); 5749 subs(cnt1, cnt1, 2 * elem_per_word); 5750 br(LE, TAIL); 5751 eor(tmp4, tmp3, tmp4); 5752 cbnz(tmp4, DONE); 5753 ldr(tmp3, Address(pre(a1, wordSize))); 5754 ldr(tmp4, Address(pre(a2, wordSize))); 5755 cmp(cnt1, (u1)elem_per_word); 5756 br(LE, TAIL2); 5757 cmp(tmp1, tmp2); 5758 } br(EQ, NEXT_DWORD); 5759 b(DONE); 5760 5761 bind(TAIL); 5762 eor(tmp4, tmp3, tmp4); 5763 eor(tmp2, tmp1, tmp2); 5764 lslv(tmp2, tmp2, tmp5); 5765 orr(tmp5, tmp4, tmp2); 5766 cmp(tmp5, zr); 5767 b(CSET_EQ); 5768 5769 bind(TAIL2); 5770 eor(tmp2, tmp1, tmp2); 5771 cbnz(tmp2, DONE); 5772 b(LAST_CHECK); 5773 5774 bind(STUB); 5775 ldr(tmp4, Address(pre(a2, start_offset))); 5776 if (elem_size == 2) { // convert to byte counter 5777 lsl(cnt1, cnt1, 1); 5778 } 5779 eor(tmp5, tmp3, tmp4); 5780 cbnz(tmp5, DONE); 5781 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5782 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5783 address tpc = trampoline_call(stub); 5784 if (tpc == nullptr) { 5785 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5786 postcond(pc() == badAddress); 5787 return nullptr; 5788 } 5789 b(DONE); 5790 5791 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5792 // so, if a2 == null => return false(0), else return true, so we can return a2 5793 mov(result, a2); 5794 b(DONE); 5795 bind(SHORT); 5796 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5797 ldr(tmp3, Address(a1, start_offset)); 5798 ldr(tmp4, Address(a2, start_offset)); 5799 bind(LAST_CHECK); 5800 eor(tmp4, tmp3, tmp4); 5801 lslv(tmp5, tmp4, tmp5); 5802 cmp(tmp5, zr); 5803 bind(CSET_EQ); 5804 cset(result, EQ); 5805 b(DONE); 5806 } 5807 5808 bind(SAME); 5809 mov(result, true); 5810 // That's it. 5811 bind(DONE); 5812 5813 BLOCK_COMMENT("} array_equals"); 5814 postcond(pc() != badAddress); 5815 return pc(); 5816 } 5817 5818 // Compare Strings 5819 5820 // For Strings we're passed the address of the first characters in a1 5821 // and a2 and the length in cnt1. 5822 // There are two implementations. For arrays >= 8 bytes, all 5823 // comparisons (including the final one, which may overlap) are 5824 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5825 // halfword, then a short, and then a byte. 5826 5827 void MacroAssembler::string_equals(Register a1, Register a2, 5828 Register result, Register cnt1) 5829 { 5830 Label SAME, DONE, SHORT, NEXT_WORD; 5831 Register tmp1 = rscratch1; 5832 Register tmp2 = rscratch2; 5833 Register cnt2 = tmp2; // cnt2 only used in array length compare 5834 5835 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5836 5837 #ifndef PRODUCT 5838 { 5839 char comment[64]; 5840 snprintf(comment, sizeof comment, "{string_equalsL"); 5841 BLOCK_COMMENT(comment); 5842 } 5843 #endif 5844 5845 mov(result, false); 5846 5847 // Check for short strings, i.e. smaller than wordSize. 5848 subs(cnt1, cnt1, wordSize); 5849 br(Assembler::LT, SHORT); 5850 // Main 8 byte comparison loop. 5851 bind(NEXT_WORD); { 5852 ldr(tmp1, Address(post(a1, wordSize))); 5853 ldr(tmp2, Address(post(a2, wordSize))); 5854 subs(cnt1, cnt1, wordSize); 5855 eor(tmp1, tmp1, tmp2); 5856 cbnz(tmp1, DONE); 5857 } br(GT, NEXT_WORD); 5858 // Last longword. In the case where length == 4 we compare the 5859 // same longword twice, but that's still faster than another 5860 // conditional branch. 5861 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5862 // length == 4. 5863 ldr(tmp1, Address(a1, cnt1)); 5864 ldr(tmp2, Address(a2, cnt1)); 5865 eor(tmp2, tmp1, tmp2); 5866 cbnz(tmp2, DONE); 5867 b(SAME); 5868 5869 bind(SHORT); 5870 Label TAIL03, TAIL01; 5871 5872 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5873 { 5874 ldrw(tmp1, Address(post(a1, 4))); 5875 ldrw(tmp2, Address(post(a2, 4))); 5876 eorw(tmp1, tmp1, tmp2); 5877 cbnzw(tmp1, DONE); 5878 } 5879 bind(TAIL03); 5880 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5881 { 5882 ldrh(tmp1, Address(post(a1, 2))); 5883 ldrh(tmp2, Address(post(a2, 2))); 5884 eorw(tmp1, tmp1, tmp2); 5885 cbnzw(tmp1, DONE); 5886 } 5887 bind(TAIL01); 5888 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5889 { 5890 ldrb(tmp1, a1); 5891 ldrb(tmp2, a2); 5892 eorw(tmp1, tmp1, tmp2); 5893 cbnzw(tmp1, DONE); 5894 } 5895 // Arrays are equal. 5896 bind(SAME); 5897 mov(result, true); 5898 5899 // That's it. 5900 bind(DONE); 5901 BLOCK_COMMENT("} string_equals"); 5902 } 5903 5904 5905 // The size of the blocks erased by the zero_blocks stub. We must 5906 // handle anything smaller than this ourselves in zero_words(). 5907 const int MacroAssembler::zero_words_block_size = 8; 5908 5909 // zero_words() is used by C2 ClearArray patterns and by 5910 // C1_MacroAssembler. It is as small as possible, handling small word 5911 // counts locally and delegating anything larger to the zero_blocks 5912 // stub. It is expanded many times in compiled code, so it is 5913 // important to keep it short. 5914 5915 // ptr: Address of a buffer to be zeroed. 5916 // cnt: Count in HeapWords. 5917 // 5918 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5919 address MacroAssembler::zero_words(Register ptr, Register cnt) 5920 { 5921 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5922 5923 BLOCK_COMMENT("zero_words {"); 5924 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5925 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5926 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5927 5928 subs(rscratch1, cnt, zero_words_block_size); 5929 Label around; 5930 br(LO, around); 5931 { 5932 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5933 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5934 // Make sure this is a C2 compilation. C1 allocates space only for 5935 // trampoline stubs generated by Call LIR ops, and in any case it 5936 // makes sense for a C1 compilation task to proceed as quickly as 5937 // possible. 5938 CompileTask* task; 5939 if (StubRoutines::aarch64::complete() 5940 && Thread::current()->is_Compiler_thread() 5941 && (task = ciEnv::current()->task()) 5942 && is_c2_compile(task->comp_level())) { 5943 address tpc = trampoline_call(zero_blocks); 5944 if (tpc == nullptr) { 5945 DEBUG_ONLY(reset_labels(around)); 5946 return nullptr; 5947 } 5948 } else { 5949 far_call(zero_blocks); 5950 } 5951 } 5952 bind(around); 5953 5954 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5955 // for us. 5956 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5957 Label l; 5958 tbz(cnt, exact_log2(i), l); 5959 for (int j = 0; j < i; j += 2) { 5960 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5961 } 5962 bind(l); 5963 } 5964 { 5965 Label l; 5966 tbz(cnt, 0, l); 5967 str(zr, Address(ptr)); 5968 bind(l); 5969 } 5970 5971 BLOCK_COMMENT("} zero_words"); 5972 return pc(); 5973 } 5974 5975 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5976 // cnt: Immediate count in HeapWords. 5977 // 5978 // r10, r11, rscratch1, and rscratch2 are clobbered. 5979 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5980 { 5981 assert(wordSize <= BlockZeroingLowLimit, 5982 "increase BlockZeroingLowLimit"); 5983 address result = nullptr; 5984 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5985 #ifndef PRODUCT 5986 { 5987 char buf[64]; 5988 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5989 BLOCK_COMMENT(buf); 5990 } 5991 #endif 5992 if (cnt >= 16) { 5993 uint64_t loops = cnt/16; 5994 if (loops > 1) { 5995 mov(rscratch2, loops - 1); 5996 } 5997 { 5998 Label loop; 5999 bind(loop); 6000 for (int i = 0; i < 16; i += 2) { 6001 stp(zr, zr, Address(base, i * BytesPerWord)); 6002 } 6003 add(base, base, 16 * BytesPerWord); 6004 if (loops > 1) { 6005 subs(rscratch2, rscratch2, 1); 6006 br(GE, loop); 6007 } 6008 } 6009 } 6010 cnt %= 16; 6011 int i = cnt & 1; // store any odd word to start 6012 if (i) str(zr, Address(base)); 6013 for (; i < (int)cnt; i += 2) { 6014 stp(zr, zr, Address(base, i * wordSize)); 6015 } 6016 BLOCK_COMMENT("} zero_words"); 6017 result = pc(); 6018 } else { 6019 mov(r10, base); mov(r11, cnt); 6020 result = zero_words(r10, r11); 6021 } 6022 return result; 6023 } 6024 6025 // Zero blocks of memory by using DC ZVA. 6026 // 6027 // Aligns the base address first sufficiently for DC ZVA, then uses 6028 // DC ZVA repeatedly for every full block. cnt is the size to be 6029 // zeroed in HeapWords. Returns the count of words left to be zeroed 6030 // in cnt. 6031 // 6032 // NOTE: This is intended to be used in the zero_blocks() stub. If 6033 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6034 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6035 Register tmp = rscratch1; 6036 Register tmp2 = rscratch2; 6037 int zva_length = VM_Version::zva_length(); 6038 Label initial_table_end, loop_zva; 6039 Label fini; 6040 6041 // Base must be 16 byte aligned. If not just return and let caller handle it 6042 tst(base, 0x0f); 6043 br(Assembler::NE, fini); 6044 // Align base with ZVA length. 6045 neg(tmp, base); 6046 andr(tmp, tmp, zva_length - 1); 6047 6048 // tmp: the number of bytes to be filled to align the base with ZVA length. 6049 add(base, base, tmp); 6050 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6051 adr(tmp2, initial_table_end); 6052 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6053 br(tmp2); 6054 6055 for (int i = -zva_length + 16; i < 0; i += 16) 6056 stp(zr, zr, Address(base, i)); 6057 bind(initial_table_end); 6058 6059 sub(cnt, cnt, zva_length >> 3); 6060 bind(loop_zva); 6061 dc(Assembler::ZVA, base); 6062 subs(cnt, cnt, zva_length >> 3); 6063 add(base, base, zva_length); 6064 br(Assembler::GE, loop_zva); 6065 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6066 bind(fini); 6067 } 6068 6069 // base: Address of a buffer to be filled, 8 bytes aligned. 6070 // cnt: Count in 8-byte unit. 6071 // value: Value to be filled with. 6072 // base will point to the end of the buffer after filling. 6073 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6074 { 6075 // Algorithm: 6076 // 6077 // if (cnt == 0) { 6078 // return; 6079 // } 6080 // if ((p & 8) != 0) { 6081 // *p++ = v; 6082 // } 6083 // 6084 // scratch1 = cnt & 14; 6085 // cnt -= scratch1; 6086 // p += scratch1; 6087 // switch (scratch1 / 2) { 6088 // do { 6089 // cnt -= 16; 6090 // p[-16] = v; 6091 // p[-15] = v; 6092 // case 7: 6093 // p[-14] = v; 6094 // p[-13] = v; 6095 // case 6: 6096 // p[-12] = v; 6097 // p[-11] = v; 6098 // // ... 6099 // case 1: 6100 // p[-2] = v; 6101 // p[-1] = v; 6102 // case 0: 6103 // p += 16; 6104 // } while (cnt); 6105 // } 6106 // if ((cnt & 1) == 1) { 6107 // *p++ = v; 6108 // } 6109 6110 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6111 6112 Label fini, skip, entry, loop; 6113 const int unroll = 8; // Number of stp instructions we'll unroll 6114 6115 cbz(cnt, fini); 6116 tbz(base, 3, skip); 6117 str(value, Address(post(base, 8))); 6118 sub(cnt, cnt, 1); 6119 bind(skip); 6120 6121 andr(rscratch1, cnt, (unroll-1) * 2); 6122 sub(cnt, cnt, rscratch1); 6123 add(base, base, rscratch1, Assembler::LSL, 3); 6124 adr(rscratch2, entry); 6125 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6126 br(rscratch2); 6127 6128 bind(loop); 6129 add(base, base, unroll * 16); 6130 for (int i = -unroll; i < 0; i++) 6131 stp(value, value, Address(base, i * 16)); 6132 bind(entry); 6133 subs(cnt, cnt, unroll * 2); 6134 br(Assembler::GE, loop); 6135 6136 tbz(cnt, 0, fini); 6137 str(value, Address(post(base, 8))); 6138 bind(fini); 6139 } 6140 6141 // Intrinsic for 6142 // 6143 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6144 // return the number of characters copied. 6145 // - java/lang/StringUTF16.compress 6146 // return index of non-latin1 character if copy fails, otherwise 'len'. 6147 // 6148 // This version always returns the number of characters copied, and does not 6149 // clobber the 'len' register. A successful copy will complete with the post- 6150 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6151 // post-condition: 0 <= 'res' < 'len'. 6152 // 6153 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6154 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6155 // beyond the acceptable, even though the footprint would be smaller. 6156 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6157 // avoid additional bloat. 6158 // 6159 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6160 void MacroAssembler::encode_iso_array(Register src, Register dst, 6161 Register len, Register res, bool ascii, 6162 FloatRegister vtmp0, FloatRegister vtmp1, 6163 FloatRegister vtmp2, FloatRegister vtmp3, 6164 FloatRegister vtmp4, FloatRegister vtmp5) 6165 { 6166 Register cnt = res; 6167 Register max = rscratch1; 6168 Register chk = rscratch2; 6169 6170 prfm(Address(src), PLDL1STRM); 6171 movw(cnt, len); 6172 6173 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6174 6175 Label LOOP_32, DONE_32, FAIL_32; 6176 6177 BIND(LOOP_32); 6178 { 6179 cmpw(cnt, 32); 6180 br(LT, DONE_32); 6181 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6182 // Extract lower bytes. 6183 FloatRegister vlo0 = vtmp4; 6184 FloatRegister vlo1 = vtmp5; 6185 uzp1(vlo0, T16B, vtmp0, vtmp1); 6186 uzp1(vlo1, T16B, vtmp2, vtmp3); 6187 // Merge bits... 6188 orr(vtmp0, T16B, vtmp0, vtmp1); 6189 orr(vtmp2, T16B, vtmp2, vtmp3); 6190 // Extract merged upper bytes. 6191 FloatRegister vhix = vtmp0; 6192 uzp2(vhix, T16B, vtmp0, vtmp2); 6193 // ISO-check on hi-parts (all zero). 6194 // ASCII-check on lo-parts (no sign). 6195 FloatRegister vlox = vtmp1; // Merge lower bytes. 6196 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6197 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6198 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6199 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6200 ASCII(orr(chk, chk, max)); 6201 cbnz(chk, FAIL_32); 6202 subw(cnt, cnt, 32); 6203 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6204 b(LOOP_32); 6205 } 6206 BIND(FAIL_32); 6207 sub(src, src, 64); 6208 BIND(DONE_32); 6209 6210 Label LOOP_8, SKIP_8; 6211 6212 BIND(LOOP_8); 6213 { 6214 cmpw(cnt, 8); 6215 br(LT, SKIP_8); 6216 FloatRegister vhi = vtmp0; 6217 FloatRegister vlo = vtmp1; 6218 ld1(vtmp3, T8H, src); 6219 uzp1(vlo, T16B, vtmp3, vtmp3); 6220 uzp2(vhi, T16B, vtmp3, vtmp3); 6221 // ISO-check on hi-parts (all zero). 6222 // ASCII-check on lo-parts (no sign). 6223 ASCII(cm(LT, vtmp2, T16B, vlo)); 6224 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6225 ASCII(umov(max, vtmp2, B, 0)); 6226 ASCII(orr(chk, chk, max)); 6227 cbnz(chk, SKIP_8); 6228 6229 strd(vlo, Address(post(dst, 8))); 6230 subw(cnt, cnt, 8); 6231 add(src, src, 16); 6232 b(LOOP_8); 6233 } 6234 BIND(SKIP_8); 6235 6236 #undef ASCII 6237 6238 Label LOOP, DONE; 6239 6240 cbz(cnt, DONE); 6241 BIND(LOOP); 6242 { 6243 Register chr = rscratch1; 6244 ldrh(chr, Address(post(src, 2))); 6245 tst(chr, ascii ? 0xff80 : 0xff00); 6246 br(NE, DONE); 6247 strb(chr, Address(post(dst, 1))); 6248 subs(cnt, cnt, 1); 6249 br(GT, LOOP); 6250 } 6251 BIND(DONE); 6252 // Return index where we stopped. 6253 subw(res, len, cnt); 6254 } 6255 6256 // Inflate byte[] array to char[]. 6257 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6258 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6259 FloatRegister vtmp1, FloatRegister vtmp2, 6260 FloatRegister vtmp3, Register tmp4) { 6261 Label big, done, after_init, to_stub; 6262 6263 assert_different_registers(src, dst, len, tmp4, rscratch1); 6264 6265 fmovd(vtmp1, 0.0); 6266 lsrw(tmp4, len, 3); 6267 bind(after_init); 6268 cbnzw(tmp4, big); 6269 // Short string: less than 8 bytes. 6270 { 6271 Label loop, tiny; 6272 6273 cmpw(len, 4); 6274 br(LT, tiny); 6275 // Use SIMD to do 4 bytes. 6276 ldrs(vtmp2, post(src, 4)); 6277 zip1(vtmp3, T8B, vtmp2, vtmp1); 6278 subw(len, len, 4); 6279 strd(vtmp3, post(dst, 8)); 6280 6281 cbzw(len, done); 6282 6283 // Do the remaining bytes by steam. 6284 bind(loop); 6285 ldrb(tmp4, post(src, 1)); 6286 strh(tmp4, post(dst, 2)); 6287 subw(len, len, 1); 6288 6289 bind(tiny); 6290 cbnz(len, loop); 6291 6292 b(done); 6293 } 6294 6295 if (SoftwarePrefetchHintDistance >= 0) { 6296 bind(to_stub); 6297 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6298 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6299 address tpc = trampoline_call(stub); 6300 if (tpc == nullptr) { 6301 DEBUG_ONLY(reset_labels(big, done)); 6302 postcond(pc() == badAddress); 6303 return nullptr; 6304 } 6305 b(after_init); 6306 } 6307 6308 // Unpack the bytes 8 at a time. 6309 bind(big); 6310 { 6311 Label loop, around, loop_last, loop_start; 6312 6313 if (SoftwarePrefetchHintDistance >= 0) { 6314 const int large_loop_threshold = (64 + 16)/8; 6315 ldrd(vtmp2, post(src, 8)); 6316 andw(len, len, 7); 6317 cmp(tmp4, (u1)large_loop_threshold); 6318 br(GE, to_stub); 6319 b(loop_start); 6320 6321 bind(loop); 6322 ldrd(vtmp2, post(src, 8)); 6323 bind(loop_start); 6324 subs(tmp4, tmp4, 1); 6325 br(EQ, loop_last); 6326 zip1(vtmp2, T16B, vtmp2, vtmp1); 6327 ldrd(vtmp3, post(src, 8)); 6328 st1(vtmp2, T8H, post(dst, 16)); 6329 subs(tmp4, tmp4, 1); 6330 zip1(vtmp3, T16B, vtmp3, vtmp1); 6331 st1(vtmp3, T8H, post(dst, 16)); 6332 br(NE, loop); 6333 b(around); 6334 bind(loop_last); 6335 zip1(vtmp2, T16B, vtmp2, vtmp1); 6336 st1(vtmp2, T8H, post(dst, 16)); 6337 bind(around); 6338 cbz(len, done); 6339 } else { 6340 andw(len, len, 7); 6341 bind(loop); 6342 ldrd(vtmp2, post(src, 8)); 6343 sub(tmp4, tmp4, 1); 6344 zip1(vtmp3, T16B, vtmp2, vtmp1); 6345 st1(vtmp3, T8H, post(dst, 16)); 6346 cbnz(tmp4, loop); 6347 } 6348 } 6349 6350 // Do the tail of up to 8 bytes. 6351 add(src, src, len); 6352 ldrd(vtmp3, Address(src, -8)); 6353 add(dst, dst, len, ext::uxtw, 1); 6354 zip1(vtmp3, T16B, vtmp3, vtmp1); 6355 strq(vtmp3, Address(dst, -16)); 6356 6357 bind(done); 6358 postcond(pc() != badAddress); 6359 return pc(); 6360 } 6361 6362 // Compress char[] array to byte[]. 6363 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6364 // Return the array length if every element in array can be encoded, 6365 // otherwise, the index of first non-latin1 (> 0xff) character. 6366 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6367 Register res, 6368 FloatRegister tmp0, FloatRegister tmp1, 6369 FloatRegister tmp2, FloatRegister tmp3, 6370 FloatRegister tmp4, FloatRegister tmp5) { 6371 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6372 } 6373 6374 // java.math.round(double a) 6375 // Returns the closest long to the argument, with ties rounding to 6376 // positive infinity. This requires some fiddling for corner 6377 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6378 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6379 FloatRegister ftmp) { 6380 Label DONE; 6381 BLOCK_COMMENT("java_round_double: { "); 6382 fmovd(rscratch1, src); 6383 // Use RoundToNearestTiesAway unless src small and -ve. 6384 fcvtasd(dst, src); 6385 // Test if src >= 0 || abs(src) >= 0x1.0p52 6386 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6387 mov(rscratch2, julong_cast(0x1.0p52)); 6388 cmp(rscratch1, rscratch2); 6389 br(HS, DONE); { 6390 // src < 0 && abs(src) < 0x1.0p52 6391 // src may have a fractional part, so add 0.5 6392 fmovd(ftmp, 0.5); 6393 faddd(ftmp, src, ftmp); 6394 // Convert double to jlong, use RoundTowardsNegative 6395 fcvtmsd(dst, ftmp); 6396 } 6397 bind(DONE); 6398 BLOCK_COMMENT("} java_round_double"); 6399 } 6400 6401 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6402 FloatRegister ftmp) { 6403 Label DONE; 6404 BLOCK_COMMENT("java_round_float: { "); 6405 fmovs(rscratch1, src); 6406 // Use RoundToNearestTiesAway unless src small and -ve. 6407 fcvtassw(dst, src); 6408 // Test if src >= 0 || abs(src) >= 0x1.0p23 6409 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6410 mov(rscratch2, jint_cast(0x1.0p23f)); 6411 cmp(rscratch1, rscratch2); 6412 br(HS, DONE); { 6413 // src < 0 && |src| < 0x1.0p23 6414 // src may have a fractional part, so add 0.5 6415 fmovs(ftmp, 0.5f); 6416 fadds(ftmp, src, ftmp); 6417 // Convert float to jint, use RoundTowardsNegative 6418 fcvtmssw(dst, ftmp); 6419 } 6420 bind(DONE); 6421 BLOCK_COMMENT("} java_round_float"); 6422 } 6423 6424 // get_thread() can be called anywhere inside generated code so we 6425 // need to save whatever non-callee save context might get clobbered 6426 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6427 // the call setup code. 6428 // 6429 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6430 // On other systems, the helper is a usual C function. 6431 // 6432 void MacroAssembler::get_thread(Register dst) { 6433 RegSet saved_regs = 6434 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6435 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6436 6437 protect_return_address(); 6438 push(saved_regs, sp); 6439 6440 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6441 blr(lr); 6442 if (dst != c_rarg0) { 6443 mov(dst, c_rarg0); 6444 } 6445 6446 pop(saved_regs, sp); 6447 authenticate_return_address(); 6448 } 6449 6450 void MacroAssembler::cache_wb(Address line) { 6451 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6452 assert(line.index() == noreg, "index should be noreg"); 6453 assert(line.offset() == 0, "offset should be 0"); 6454 // would like to assert this 6455 // assert(line._ext.shift == 0, "shift should be zero"); 6456 if (VM_Version::supports_dcpop()) { 6457 // writeback using clear virtual address to point of persistence 6458 dc(Assembler::CVAP, line.base()); 6459 } else { 6460 // no need to generate anything as Unsafe.writebackMemory should 6461 // never invoke this stub 6462 } 6463 } 6464 6465 void MacroAssembler::cache_wbsync(bool is_pre) { 6466 // we only need a barrier post sync 6467 if (!is_pre) { 6468 membar(Assembler::AnyAny); 6469 } 6470 } 6471 6472 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6473 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) { 6474 return; 6475 } 6476 // Make sure that native code does not change SVE vector length. 6477 Label verify_ok; 6478 movw(tmp, zr); 6479 sve_inc(tmp, B); 6480 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6481 br(EQ, verify_ok); 6482 stop("Error: SVE vector length has changed since jvm startup"); 6483 bind(verify_ok); 6484 } 6485 6486 void MacroAssembler::verify_ptrue() { 6487 Label verify_ok; 6488 if (!UseSVE) { 6489 return; 6490 } 6491 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6492 sve_dec(rscratch1, B); 6493 cbz(rscratch1, verify_ok); 6494 stop("Error: the preserved predicate register (p7) elements are not all true"); 6495 bind(verify_ok); 6496 } 6497 6498 void MacroAssembler::safepoint_isb() { 6499 isb(); 6500 #ifndef PRODUCT 6501 if (VerifyCrossModifyFence) { 6502 // Clear the thread state. 6503 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6504 } 6505 #endif 6506 } 6507 6508 #ifndef PRODUCT 6509 void MacroAssembler::verify_cross_modify_fence_not_required() { 6510 if (VerifyCrossModifyFence) { 6511 // Check if thread needs a cross modify fence. 6512 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6513 Label fence_not_required; 6514 cbz(rscratch1, fence_not_required); 6515 // If it does then fail. 6516 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure))); 6517 mov(c_rarg0, rthread); 6518 blr(rscratch1); 6519 bind(fence_not_required); 6520 } 6521 } 6522 #endif 6523 6524 void MacroAssembler::spin_wait() { 6525 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6526 switch (VM_Version::spin_wait_desc().inst()) { 6527 case SpinWait::NOP: 6528 nop(); 6529 break; 6530 case SpinWait::ISB: 6531 isb(); 6532 break; 6533 case SpinWait::YIELD: 6534 yield(); 6535 break; 6536 default: 6537 ShouldNotReachHere(); 6538 } 6539 } 6540 } 6541 6542 // Stack frame creation/removal 6543 6544 void MacroAssembler::enter(bool strip_ret_addr) { 6545 if (strip_ret_addr) { 6546 // Addresses can only be signed once. If there are multiple nested frames being created 6547 // in the same function, then the return address needs stripping first. 6548 strip_return_address(); 6549 } 6550 protect_return_address(); 6551 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6552 mov(rfp, sp); 6553 } 6554 6555 void MacroAssembler::leave() { 6556 mov(sp, rfp); 6557 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6558 authenticate_return_address(); 6559 } 6560 6561 // ROP Protection 6562 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6563 // destroying stack frames or whenever directly loading/storing the LR to memory. 6564 // If ROP protection is not set then these functions are no-ops. 6565 // For more details on PAC see pauth_aarch64.hpp. 6566 6567 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6568 // Uses value zero as the modifier. 6569 // 6570 void MacroAssembler::protect_return_address() { 6571 if (VM_Version::use_rop_protection()) { 6572 check_return_address(); 6573 paciaz(); 6574 } 6575 } 6576 6577 // Sign the return value in the given register. Use before updating the LR in the existing stack 6578 // frame for the current function. 6579 // Uses value zero as the modifier. 6580 // 6581 void MacroAssembler::protect_return_address(Register return_reg) { 6582 if (VM_Version::use_rop_protection()) { 6583 check_return_address(return_reg); 6584 paciza(return_reg); 6585 } 6586 } 6587 6588 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6589 // Uses value zero as the modifier. 6590 // 6591 void MacroAssembler::authenticate_return_address() { 6592 if (VM_Version::use_rop_protection()) { 6593 autiaz(); 6594 check_return_address(); 6595 } 6596 } 6597 6598 // Authenticate the return value in the given register. Use before updating the LR in the existing 6599 // stack frame for the current function. 6600 // Uses value zero as the modifier. 6601 // 6602 void MacroAssembler::authenticate_return_address(Register return_reg) { 6603 if (VM_Version::use_rop_protection()) { 6604 autiza(return_reg); 6605 check_return_address(return_reg); 6606 } 6607 } 6608 6609 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6610 // there is no guaranteed way of authenticating the LR. 6611 // 6612 void MacroAssembler::strip_return_address() { 6613 if (VM_Version::use_rop_protection()) { 6614 xpaclri(); 6615 } 6616 } 6617 6618 #ifndef PRODUCT 6619 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6620 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6621 // it is difficult to debug back to the callee function. 6622 // This function simply loads from the address in the given register. 6623 // Use directly after authentication to catch authentication failures. 6624 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6625 // 6626 void MacroAssembler::check_return_address(Register return_reg) { 6627 if (VM_Version::use_rop_protection()) { 6628 ldr(zr, Address(return_reg)); 6629 } 6630 } 6631 #endif 6632 6633 // The java_calling_convention describes stack locations as ideal slots on 6634 // a frame with no abi restrictions. Since we must observe abi restrictions 6635 // (like the placement of the register window) the slots must be biased by 6636 // the following value. 6637 static int reg2offset_in(VMReg r) { 6638 // Account for saved rfp and lr 6639 // This should really be in_preserve_stack_slots 6640 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6641 } 6642 6643 static int reg2offset_out(VMReg r) { 6644 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6645 } 6646 6647 // On 64bit we will store integer like items to the stack as 6648 // 64bits items (AArch64 ABI) even though java would only store 6649 // 32bits for a parameter. On 32bit it will simply be 32bits 6650 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6651 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6652 if (src.first()->is_stack()) { 6653 if (dst.first()->is_stack()) { 6654 // stack to stack 6655 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6656 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6657 } else { 6658 // stack to reg 6659 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6660 } 6661 } else if (dst.first()->is_stack()) { 6662 // reg to stack 6663 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6664 } else { 6665 if (dst.first() != src.first()) { 6666 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6667 } 6668 } 6669 } 6670 6671 // An oop arg. Must pass a handle not the oop itself 6672 void MacroAssembler::object_move( 6673 OopMap* map, 6674 int oop_handle_offset, 6675 int framesize_in_slots, 6676 VMRegPair src, 6677 VMRegPair dst, 6678 bool is_receiver, 6679 int* receiver_offset) { 6680 6681 // must pass a handle. First figure out the location we use as a handle 6682 6683 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6684 6685 // See if oop is null if it is we need no handle 6686 6687 if (src.first()->is_stack()) { 6688 6689 // Oop is already on the stack as an argument 6690 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6691 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6692 if (is_receiver) { 6693 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6694 } 6695 6696 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6697 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6698 // conditionally move a null 6699 cmp(rscratch1, zr); 6700 csel(rHandle, zr, rHandle, Assembler::EQ); 6701 } else { 6702 6703 // Oop is in an a register we must store it to the space we reserve 6704 // on the stack for oop_handles and pass a handle if oop is non-null 6705 6706 const Register rOop = src.first()->as_Register(); 6707 int oop_slot; 6708 if (rOop == j_rarg0) 6709 oop_slot = 0; 6710 else if (rOop == j_rarg1) 6711 oop_slot = 1; 6712 else if (rOop == j_rarg2) 6713 oop_slot = 2; 6714 else if (rOop == j_rarg3) 6715 oop_slot = 3; 6716 else if (rOop == j_rarg4) 6717 oop_slot = 4; 6718 else if (rOop == j_rarg5) 6719 oop_slot = 5; 6720 else if (rOop == j_rarg6) 6721 oop_slot = 6; 6722 else { 6723 assert(rOop == j_rarg7, "wrong register"); 6724 oop_slot = 7; 6725 } 6726 6727 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6728 int offset = oop_slot*VMRegImpl::stack_slot_size; 6729 6730 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6731 // Store oop in handle area, may be null 6732 str(rOop, Address(sp, offset)); 6733 if (is_receiver) { 6734 *receiver_offset = offset; 6735 } 6736 6737 cmp(rOop, zr); 6738 lea(rHandle, Address(sp, offset)); 6739 // conditionally move a null 6740 csel(rHandle, zr, rHandle, Assembler::EQ); 6741 } 6742 6743 // If arg is on the stack then place it otherwise it is already in correct reg. 6744 if (dst.first()->is_stack()) { 6745 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6746 } 6747 } 6748 6749 // A float arg may have to do float reg int reg conversion 6750 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6751 if (src.first()->is_stack()) { 6752 if (dst.first()->is_stack()) { 6753 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6754 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6755 } else { 6756 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6757 } 6758 } else if (src.first() != dst.first()) { 6759 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6760 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6761 else 6762 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6763 } 6764 } 6765 6766 // A long move 6767 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6768 if (src.first()->is_stack()) { 6769 if (dst.first()->is_stack()) { 6770 // stack to stack 6771 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6772 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6773 } else { 6774 // stack to reg 6775 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6776 } 6777 } else if (dst.first()->is_stack()) { 6778 // reg to stack 6779 // Do we really have to sign extend??? 6780 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6781 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6782 } else { 6783 if (dst.first() != src.first()) { 6784 mov(dst.first()->as_Register(), src.first()->as_Register()); 6785 } 6786 } 6787 } 6788 6789 6790 // A double move 6791 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6792 if (src.first()->is_stack()) { 6793 if (dst.first()->is_stack()) { 6794 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6795 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6796 } else { 6797 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6798 } 6799 } else if (src.first() != dst.first()) { 6800 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6801 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6802 else 6803 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6804 } 6805 } 6806 6807 // Implements lightweight-locking. 6808 // 6809 // - obj: the object to be locked 6810 // - t1, t2, t3: temporary registers, will be destroyed 6811 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 6812 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { 6813 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6814 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); 6815 6816 Label push; 6817 const Register top = t1; 6818 const Register mark = t2; 6819 const Register t = t3; 6820 6821 // Preload the markWord. It is important that this is the first 6822 // instruction emitted as it is part of C1's null check semantics. 6823 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6824 6825 if (UseObjectMonitorTable) { 6826 // Clear cache in case fast locking succeeds. 6827 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); 6828 } 6829 6830 // Check if the lock-stack is full. 6831 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6832 cmpw(top, (unsigned)LockStack::end_offset()); 6833 br(Assembler::GE, slow); 6834 6835 // Check for recursion. 6836 subw(t, top, oopSize); 6837 ldr(t, Address(rthread, t)); 6838 cmp(obj, t); 6839 br(Assembler::EQ, push); 6840 6841 // Check header for monitor (0b10). 6842 tst(mark, markWord::monitor_value); 6843 br(Assembler::NE, slow); 6844 6845 // Try to lock. Transition lock bits 0b01 => 0b00 6846 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6847 orr(mark, mark, markWord::unlocked_value); 6848 eor(t, mark, markWord::unlocked_value); 6849 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 6850 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 6851 br(Assembler::NE, slow); 6852 6853 bind(push); 6854 // After successful lock, push object on lock-stack. 6855 str(obj, Address(rthread, top)); 6856 addw(top, top, oopSize); 6857 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6858 } 6859 6860 // Implements lightweight-unlocking. 6861 // 6862 // - obj: the object to be unlocked 6863 // - t1, t2, t3: temporary registers 6864 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 6865 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6866 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6867 // cmpxchg clobbers rscratch1. 6868 assert_different_registers(obj, t1, t2, t3, rscratch1); 6869 6870 #ifdef ASSERT 6871 { 6872 // Check for lock-stack underflow. 6873 Label stack_ok; 6874 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6875 cmpw(t1, (unsigned)LockStack::start_offset()); 6876 br(Assembler::GE, stack_ok); 6877 STOP("Lock-stack underflow"); 6878 bind(stack_ok); 6879 } 6880 #endif 6881 6882 Label unlocked, push_and_slow; 6883 const Register top = t1; 6884 const Register mark = t2; 6885 const Register t = t3; 6886 6887 // Check if obj is top of lock-stack. 6888 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6889 subw(top, top, oopSize); 6890 ldr(t, Address(rthread, top)); 6891 cmp(obj, t); 6892 br(Assembler::NE, slow); 6893 6894 // Pop lock-stack. 6895 DEBUG_ONLY(str(zr, Address(rthread, top));) 6896 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6897 6898 // Check if recursive. 6899 subw(t, top, oopSize); 6900 ldr(t, Address(rthread, t)); 6901 cmp(obj, t); 6902 br(Assembler::EQ, unlocked); 6903 6904 // Not recursive. Check header for monitor (0b10). 6905 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6906 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 6907 6908 #ifdef ASSERT 6909 // Check header not unlocked (0b01). 6910 Label not_unlocked; 6911 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 6912 stop("lightweight_unlock already unlocked"); 6913 bind(not_unlocked); 6914 #endif 6915 6916 // Try to unlock. Transition lock bits 0b00 => 0b01 6917 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6918 orr(t, mark, markWord::unlocked_value); 6919 cmpxchg(obj, mark, t, Assembler::xword, 6920 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 6921 br(Assembler::EQ, unlocked); 6922 6923 bind(push_and_slow); 6924 // Restore lock-stack and handle the unlock in runtime. 6925 DEBUG_ONLY(str(obj, Address(rthread, top));) 6926 addw(top, top, oopSize); 6927 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6928 b(slow); 6929 6930 bind(unlocked); 6931 }