1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "code/compiledIC.hpp" 32 #include "compiler/compileTask.hpp" 33 #include "compiler/disassembler.hpp" 34 #include "compiler/oopMap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/barrierSetAssembler.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "gc/shared/cardTable.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "gc/shared/tlab_globals.hpp" 41 #include "interpreter/bytecodeHistogram.hpp" 42 #include "interpreter/interpreter.hpp" 43 #include "jvm.h" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "nativeInst_aarch64.hpp" 47 #include "oops/accessDecorators.hpp" 48 #include "oops/compressedKlass.inline.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/klass.inline.hpp" 51 #include "runtime/continuation.hpp" 52 #include "runtime/icache.hpp" 53 #include "runtime/interfaceSupport.inline.hpp" 54 #include "runtime/javaThread.hpp" 55 #include "runtime/jniHandles.inline.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/stubRoutines.hpp" 58 #include "utilities/globalDefinitions.hpp" 59 #include "utilities/powerOfTwo.hpp" 60 #ifdef COMPILER1 61 #include "c1/c1_LIRAssembler.hpp" 62 #endif 63 #ifdef COMPILER2 64 #include "oops/oop.hpp" 65 #include "opto/compile.hpp" 66 #include "opto/node.hpp" 67 #include "opto/output.hpp" 68 #endif 69 70 #include <sys/types.h> 71 72 #ifdef PRODUCT 73 #define BLOCK_COMMENT(str) /* nothing */ 74 #else 75 #define BLOCK_COMMENT(str) block_comment(str) 76 #endif 77 #define STOP(str) stop(str); 78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 79 80 #ifdef ASSERT 81 extern "C" void disnm(intptr_t p); 82 #endif 83 // Target-dependent relocation processing 84 // 85 // Instruction sequences whose target may need to be retrieved or 86 // patched are distinguished by their leading instruction, sorting 87 // them into three main instruction groups and related subgroups. 88 // 89 // 1) Branch, Exception and System (insn count = 1) 90 // 1a) Unconditional branch (immediate): 91 // b/bl imm19 92 // 1b) Compare & branch (immediate): 93 // cbz/cbnz Rt imm19 94 // 1c) Test & branch (immediate): 95 // tbz/tbnz Rt imm14 96 // 1d) Conditional branch (immediate): 97 // b.cond imm19 98 // 99 // 2) Loads and Stores (insn count = 1) 100 // 2a) Load register literal: 101 // ldr Rt imm19 102 // 103 // 3) Data Processing Immediate (insn count = 2 or 3) 104 // 3a) PC-rel. addressing 105 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 106 // adr/adrp Rx imm21; add Ry Rx #imm12 107 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 108 // adr/adrp Rx imm21 109 // adr/adrp Rx imm21; movk Rx #imm16<<32 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 111 // The latter form can only happen when the target is an 112 // ExternalAddress, and (by definition) ExternalAddresses don't 113 // move. Because of that property, there is never any need to 114 // patch the last of the three instructions. However, 115 // MacroAssembler::target_addr_for_insn takes all three 116 // instructions into account and returns the correct address. 117 // 3b) Move wide (immediate) 118 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 119 // 120 // A switch on a subset of the instruction's bits provides an 121 // efficient dispatch to these subcases. 122 // 123 // insn[28:26] -> main group ('x' == don't care) 124 // 00x -> UNALLOCATED 125 // 100 -> Data Processing Immediate 126 // 101 -> Branch, Exception and System 127 // x1x -> Loads and Stores 128 // 129 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 130 // n.b. in some cases extra bits need to be checked to verify the 131 // instruction is as expected 132 // 133 // 1) ... xx101x Branch, Exception and System 134 // 1a) 00___x Unconditional branch (immediate) 135 // 1b) 01___0 Compare & branch (immediate) 136 // 1c) 01___1 Test & branch (immediate) 137 // 1d) 10___0 Conditional branch (immediate) 138 // other Should not happen 139 // 140 // 2) ... xxx1x0 Loads and Stores 141 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 142 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 143 // strictly should be 64 bit non-FP/SIMD i.e. 144 // 0101_000 (i.e. requires insn[31:24] == 01011000) 145 // 146 // 3) ... xx100x Data Processing Immediate 147 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 148 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 149 // strictly should be 64 bit movz #imm16<<0 150 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 151 // 152 class RelocActions { 153 protected: 154 typedef int (*reloc_insn)(address insn_addr, address &target); 155 156 virtual reloc_insn adrpMem() = 0; 157 virtual reloc_insn adrpAdd() = 0; 158 virtual reloc_insn adrpMovk() = 0; 159 160 const address _insn_addr; 161 const uint32_t _insn; 162 163 static uint32_t insn_at(address insn_addr, int n) { 164 return ((uint32_t*)insn_addr)[n]; 165 } 166 uint32_t insn_at(int n) const { 167 return insn_at(_insn_addr, n); 168 } 169 170 public: 171 172 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 173 RelocActions(address insn_addr, uint32_t insn) 174 : _insn_addr(insn_addr), _insn(insn) {} 175 176 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 177 virtual int conditionalBranch(address insn_addr, address &target) = 0; 178 virtual int testAndBranch(address insn_addr, address &target) = 0; 179 virtual int loadStore(address insn_addr, address &target) = 0; 180 virtual int adr(address insn_addr, address &target) = 0; 181 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 182 virtual int immediate(address insn_addr, address &target) = 0; 183 virtual void verify(address insn_addr, address &target) = 0; 184 185 int ALWAYSINLINE run(address insn_addr, address &target) { 186 int instructions = 1; 187 188 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 189 switch(dispatch) { 190 case 0b001010: 191 case 0b001011: { 192 instructions = unconditionalBranch(insn_addr, target); 193 break; 194 } 195 case 0b101010: // Conditional branch (immediate) 196 case 0b011010: { // Compare & branch (immediate) 197 instructions = conditionalBranch(insn_addr, target); 198 break; 199 } 200 case 0b011011: { 201 instructions = testAndBranch(insn_addr, target); 202 break; 203 } 204 case 0b001100: 205 case 0b001110: 206 case 0b011100: 207 case 0b011110: 208 case 0b101100: 209 case 0b101110: 210 case 0b111100: 211 case 0b111110: { 212 // load/store 213 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 214 // Load register (literal) 215 instructions = loadStore(insn_addr, target); 216 break; 217 } else { 218 // nothing to do 219 assert(target == 0, "did not expect to relocate target for polling page load"); 220 } 221 break; 222 } 223 case 0b001000: 224 case 0b011000: 225 case 0b101000: 226 case 0b111000: { 227 // adr/adrp 228 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 229 int shift = Instruction_aarch64::extract(_insn, 31, 31); 230 if (shift) { 231 uint32_t insn2 = insn_at(1); 232 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 233 Instruction_aarch64::extract(_insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 9, 5)) { 235 instructions = adrp(insn_addr, target, adrpMem()); 236 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 237 Instruction_aarch64::extract(_insn, 4, 0) == 238 Instruction_aarch64::extract(insn2, 4, 0)) { 239 instructions = adrp(insn_addr, target, adrpAdd()); 240 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 241 Instruction_aarch64::extract(_insn, 4, 0) == 242 Instruction_aarch64::extract(insn2, 4, 0)) { 243 instructions = adrp(insn_addr, target, adrpMovk()); 244 } else { 245 ShouldNotReachHere(); 246 } 247 } else { 248 instructions = adr(insn_addr, target); 249 } 250 break; 251 } 252 case 0b001001: 253 case 0b011001: 254 case 0b101001: 255 case 0b111001: { 256 instructions = immediate(insn_addr, target); 257 break; 258 } 259 default: { 260 ShouldNotReachHere(); 261 } 262 } 263 264 verify(insn_addr, target); 265 return instructions * NativeInstruction::instruction_size; 266 } 267 }; 268 269 class Patcher : public RelocActions { 270 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 271 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 272 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 273 274 public: 275 Patcher(address insn_addr) : RelocActions(insn_addr) {} 276 277 virtual int unconditionalBranch(address insn_addr, address &target) { 278 intptr_t offset = (target - insn_addr) >> 2; 279 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 280 return 1; 281 } 282 virtual int conditionalBranch(address insn_addr, address &target) { 283 intptr_t offset = (target - insn_addr) >> 2; 284 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 285 return 1; 286 } 287 virtual int testAndBranch(address insn_addr, address &target) { 288 intptr_t offset = (target - insn_addr) >> 2; 289 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 290 return 1; 291 } 292 virtual int loadStore(address insn_addr, address &target) { 293 intptr_t offset = (target - insn_addr) >> 2; 294 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 295 return 1; 296 } 297 virtual int adr(address insn_addr, address &target) { 298 #ifdef ASSERT 299 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 300 #endif 301 // PC-rel. addressing 302 ptrdiff_t offset = target - insn_addr; 303 int offset_lo = offset & 3; 304 offset >>= 2; 305 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 306 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 307 return 1; 308 } 309 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 310 int instructions = 1; 311 #ifdef ASSERT 312 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 313 #endif 314 ptrdiff_t offset = target - insn_addr; 315 instructions = 2; 316 precond(inner != nullptr); 317 // Give the inner reloc a chance to modify the target. 318 address adjusted_target = target; 319 instructions = (*inner)(insn_addr, adjusted_target); 320 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 321 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 322 offset = adr_page - pc_page; 323 int offset_lo = offset & 3; 324 offset >>= 2; 325 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 326 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 327 return instructions; 328 } 329 static int adrpMem_impl(address insn_addr, address &target) { 330 uintptr_t dest = (uintptr_t)target; 331 int offset_lo = dest & 0xfff; 332 uint32_t insn2 = insn_at(insn_addr, 1); 333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 335 guarantee(((dest >> size) << size) == dest, "misaligned target"); 336 return 2; 337 } 338 static int adrpAdd_impl(address insn_addr, address &target) { 339 uintptr_t dest = (uintptr_t)target; 340 int offset_lo = dest & 0xfff; 341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 342 return 2; 343 } 344 static int adrpMovk_impl(address insn_addr, address &target) { 345 uintptr_t dest = uintptr_t(target); 346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 348 target = address(dest); 349 return 2; 350 } 351 virtual int immediate(address insn_addr, address &target) { 352 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 353 // We encode narrow ones by setting the upper 16 bits in the first 354 // instruction. 355 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 356 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 357 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target); 358 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16); 359 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff); 360 return 2; 361 } 362 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 363 uint64_t dest = (uint64_t)target; 364 // Move wide constant 365 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 366 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 367 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 368 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 369 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 370 return 3; 371 } 372 virtual void verify(address insn_addr, address &target) { 373 #ifdef ASSERT 374 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 375 if (!(address_is == target)) { 376 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 377 disnm((intptr_t)insn_addr); 378 assert(address_is == target, "should be"); 379 } 380 #endif 381 } 382 }; 383 384 // If insn1 and insn2 use the same register to form an address, either 385 // by an offsetted LDR or a simple ADD, return the offset. If the 386 // second instruction is an LDR, the offset may be scaled. 387 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 388 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 389 Instruction_aarch64::extract(insn1, 4, 0) == 390 Instruction_aarch64::extract(insn2, 9, 5)) { 391 // Load/store register (unsigned immediate) 392 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 393 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 394 byte_offset <<= size; 395 return true; 396 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 397 Instruction_aarch64::extract(insn1, 4, 0) == 398 Instruction_aarch64::extract(insn2, 4, 0)) { 399 // add (immediate) 400 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 401 return true; 402 } 403 return false; 404 } 405 406 class AArch64Decoder : public RelocActions { 407 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 408 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 409 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 410 411 public: 412 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 413 414 virtual int loadStore(address insn_addr, address &target) { 415 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 416 target = insn_addr + (offset << 2); 417 return 1; 418 } 419 virtual int unconditionalBranch(address insn_addr, address &target) { 420 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 421 target = insn_addr + (offset << 2); 422 return 1; 423 } 424 virtual int conditionalBranch(address insn_addr, address &target) { 425 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 426 target = address(((uint64_t)insn_addr + (offset << 2))); 427 return 1; 428 } 429 virtual int testAndBranch(address insn_addr, address &target) { 430 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 431 target = address(((uint64_t)insn_addr + (offset << 2))); 432 return 1; 433 } 434 virtual int adr(address insn_addr, address &target) { 435 // PC-rel. addressing 436 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 437 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 438 target = address((uint64_t)insn_addr + offset); 439 return 1; 440 } 441 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 442 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 443 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 444 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 445 int shift = 12; 446 offset <<= shift; 447 uint64_t target_page = ((uint64_t)insn_addr) + offset; 448 target_page &= ((uint64_t)-1) << shift; 449 uint32_t insn2 = insn_at(1); 450 target = address(target_page); 451 precond(inner != nullptr); 452 (*inner)(insn_addr, target); 453 return 2; 454 } 455 static int adrpMem_impl(address insn_addr, address &target) { 456 uint32_t insn2 = insn_at(insn_addr, 1); 457 // Load/store register (unsigned immediate) 458 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 459 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 460 byte_offset <<= size; 461 target += byte_offset; 462 return 2; 463 } 464 static int adrpAdd_impl(address insn_addr, address &target) { 465 uint32_t insn2 = insn_at(insn_addr, 1); 466 // add (immediate) 467 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 468 target += byte_offset; 469 return 2; 470 } 471 static int adrpMovk_impl(address insn_addr, address &target) { 472 uint32_t insn2 = insn_at(insn_addr, 1); 473 uint64_t dest = uint64_t(target); 474 dest = (dest & 0xffff0000ffffffff) | 475 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 476 target = address(dest); 477 478 // We know the destination 4k page. Maybe we have a third 479 // instruction. 480 uint32_t insn = insn_at(insn_addr, 0); 481 uint32_t insn3 = insn_at(insn_addr, 2); 482 ptrdiff_t byte_offset; 483 if (offset_for(insn, insn3, byte_offset)) { 484 target += byte_offset; 485 return 3; 486 } else { 487 return 2; 488 } 489 } 490 virtual int immediate(address insn_addr, address &target) { 491 uint32_t *insns = (uint32_t *)insn_addr; 492 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 493 // We encode narrow ones by setting the upper 16 bits in the first 494 // instruction. 495 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 496 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 497 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16) 498 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5))); 499 target = (address)CompressedKlassPointers::decode(nk); 500 return 2; 501 } 502 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 503 // Move wide constant: movz, movk, movk. See movptr(). 504 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 505 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 506 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 507 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 508 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 509 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 510 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 511 return 3; 512 } 513 virtual void verify(address insn_addr, address &target) { 514 } 515 }; 516 517 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 518 AArch64Decoder decoder(insn_addr, insn); 519 address target; 520 decoder.run(insn_addr, target); 521 return target; 522 } 523 524 // Patch any kind of instruction; there may be several instructions. 525 // Return the total length (in bytes) of the instructions. 526 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 527 Patcher patcher(insn_addr); 528 return patcher.run(insn_addr, target); 529 } 530 531 int MacroAssembler::patch_oop(address insn_addr, address o) { 532 int instructions; 533 unsigned insn = *(unsigned*)insn_addr; 534 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 535 536 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 537 // narrow OOPs by setting the upper 16 bits in the first 538 // instruction. 539 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 540 // Move narrow OOP 541 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 542 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 543 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 544 instructions = 2; 545 } else { 546 // Move wide OOP 547 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 548 uintptr_t dest = (uintptr_t)o; 549 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 550 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 551 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 552 instructions = 3; 553 } 554 return instructions * NativeInstruction::instruction_size; 555 } 556 557 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 558 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 559 // We encode narrow ones by setting the upper 16 bits in the first 560 // instruction. 561 NativeInstruction *insn = nativeInstruction_at(insn_addr); 562 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 563 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 564 565 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 566 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 567 return 2 * NativeInstruction::instruction_size; 568 } 569 570 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 571 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 572 return nullptr; 573 } 574 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 575 } 576 577 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 578 if (acquire) { 579 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 580 ldar(tmp, tmp); 581 } else { 582 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 583 } 584 if (at_return) { 585 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 586 // we may safely use the sp instead to perform the stack watermark check. 587 cmp(in_nmethod ? sp : rfp, tmp); 588 br(Assembler::HI, slow_path); 589 } else { 590 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 591 } 592 } 593 594 void MacroAssembler::rt_call(address dest, Register tmp) { 595 CodeBlob *cb = CodeCache::find_blob(dest); 596 if (cb) { 597 far_call(RuntimeAddress(dest)); 598 } else { 599 lea(tmp, RuntimeAddress(dest)); 600 blr(tmp); 601 } 602 } 603 604 void MacroAssembler::push_cont_fastpath(Register java_thread) { 605 if (!Continuations::enabled()) return; 606 Label done; 607 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 608 cmp(sp, rscratch1); 609 br(Assembler::LS, done); 610 mov(rscratch1, sp); // we can't use sp as the source in str 611 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 612 bind(done); 613 } 614 615 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 616 if (!Continuations::enabled()) return; 617 Label done; 618 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 619 cmp(sp, rscratch1); 620 br(Assembler::LO, done); 621 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 622 bind(done); 623 } 624 625 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 626 // we must set sp to zero to clear frame 627 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 628 629 // must clear fp, so that compiled frames are not confused; it is 630 // possible that we need it only for debugging 631 if (clear_fp) { 632 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 633 } 634 635 // Always clear the pc because it could have been set by make_walkable() 636 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 637 } 638 639 // Calls to C land 640 // 641 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 642 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 643 // has to be reset to 0. This is required to allow proper stack traversal. 644 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 645 Register last_java_fp, 646 Register last_java_pc, 647 Register scratch) { 648 649 if (last_java_pc->is_valid()) { 650 str(last_java_pc, Address(rthread, 651 JavaThread::frame_anchor_offset() 652 + JavaFrameAnchor::last_Java_pc_offset())); 653 } 654 655 // determine last_java_sp register 656 if (last_java_sp == sp) { 657 mov(scratch, sp); 658 last_java_sp = scratch; 659 } else if (!last_java_sp->is_valid()) { 660 last_java_sp = esp; 661 } 662 663 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 664 665 // last_java_fp is optional 666 if (last_java_fp->is_valid()) { 667 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 668 } 669 } 670 671 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 672 Register last_java_fp, 673 address last_java_pc, 674 Register scratch) { 675 assert(last_java_pc != nullptr, "must provide a valid PC"); 676 677 adr(scratch, last_java_pc); 678 str(scratch, Address(rthread, 679 JavaThread::frame_anchor_offset() 680 + JavaFrameAnchor::last_Java_pc_offset())); 681 682 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 683 } 684 685 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 686 Register last_java_fp, 687 Label &L, 688 Register scratch) { 689 if (L.is_bound()) { 690 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 691 } else { 692 InstructionMark im(this); 693 L.add_patch_at(code(), locator()); 694 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 695 } 696 } 697 698 static inline bool target_needs_far_branch(address addr) { 699 if (SCCache::is_on_for_write()) { 700 return true; 701 } 702 // codecache size <= 128M 703 if (!MacroAssembler::far_branches()) { 704 return false; 705 } 706 // codecache size > 240M 707 if (MacroAssembler::codestub_branch_needs_far_jump()) { 708 return true; 709 } 710 // codecache size: 128M..240M 711 return !CodeCache::is_non_nmethod(addr); 712 } 713 714 void MacroAssembler::far_call(Address entry, Register tmp) { 715 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 716 assert(CodeCache::find_blob(entry.target()) != nullptr, 717 "destination of far call not found in code cache"); 718 assert(entry.rspec().type() == relocInfo::external_word_type 719 || entry.rspec().type() == relocInfo::runtime_call_type 720 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 721 if (target_needs_far_branch(entry.target())) { 722 uint64_t offset; 723 // We can use ADRP here because we know that the total size of 724 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 725 adrp(tmp, entry, offset); 726 add(tmp, tmp, offset); 727 blr(tmp); 728 } else { 729 bl(entry); 730 } 731 } 732 733 int MacroAssembler::far_jump(Address entry, Register tmp) { 734 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 735 assert(CodeCache::find_blob(entry.target()) != nullptr, 736 "destination of far call not found in code cache"); 737 assert(entry.rspec().type() == relocInfo::external_word_type 738 || entry.rspec().type() == relocInfo::runtime_call_type 739 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 740 address start = pc(); 741 if (target_needs_far_branch(entry.target())) { 742 uint64_t offset; 743 // We can use ADRP here because we know that the total size of 744 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 745 adrp(tmp, entry, offset); 746 add(tmp, tmp, offset); 747 br(tmp); 748 } else { 749 b(entry); 750 } 751 return pc() - start; 752 } 753 754 void MacroAssembler::reserved_stack_check() { 755 // testing if reserved zone needs to be enabled 756 Label no_reserved_zone_enabling; 757 758 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 759 cmp(sp, rscratch1); 760 br(Assembler::LO, no_reserved_zone_enabling); 761 762 enter(); // LR and FP are live. 763 lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); 764 mov(c_rarg0, rthread); 765 blr(rscratch1); 766 leave(); 767 768 // We have already removed our own frame. 769 // throw_delayed_StackOverflowError will think that it's been 770 // called by our caller. 771 lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 772 br(rscratch1); 773 should_not_reach_here(); 774 775 bind(no_reserved_zone_enabling); 776 } 777 778 static void pass_arg0(MacroAssembler* masm, Register arg) { 779 if (c_rarg0 != arg ) { 780 masm->mov(c_rarg0, arg); 781 } 782 } 783 784 static void pass_arg1(MacroAssembler* masm, Register arg) { 785 if (c_rarg1 != arg ) { 786 masm->mov(c_rarg1, arg); 787 } 788 } 789 790 static void pass_arg2(MacroAssembler* masm, Register arg) { 791 if (c_rarg2 != arg ) { 792 masm->mov(c_rarg2, arg); 793 } 794 } 795 796 static void pass_arg3(MacroAssembler* masm, Register arg) { 797 if (c_rarg3 != arg ) { 798 masm->mov(c_rarg3, arg); 799 } 800 } 801 802 void MacroAssembler::call_VM_base(Register oop_result, 803 Register java_thread, 804 Register last_java_sp, 805 address entry_point, 806 int number_of_arguments, 807 bool check_exceptions) { 808 // determine java_thread register 809 if (!java_thread->is_valid()) { 810 java_thread = rthread; 811 } 812 813 // determine last_java_sp register 814 if (!last_java_sp->is_valid()) { 815 last_java_sp = esp; 816 } 817 818 // debugging support 819 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 820 assert(java_thread == rthread, "unexpected register"); 821 #ifdef ASSERT 822 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 823 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 824 #endif // ASSERT 825 826 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 827 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 828 829 // push java thread (becomes first argument of C function) 830 831 mov(c_rarg0, java_thread); 832 833 // set last Java frame before call 834 assert(last_java_sp != rfp, "can't use rfp"); 835 836 Label l; 837 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 838 839 // do the call, remove parameters 840 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 841 842 // lr could be poisoned with PAC signature during throw_pending_exception 843 // if it was tail-call optimized by compiler, since lr is not callee-saved 844 // reload it with proper value 845 adr(lr, l); 846 847 // reset last Java frame 848 // Only interpreter should have to clear fp 849 reset_last_Java_frame(true); 850 851 // C++ interp handles this in the interpreter 852 check_and_handle_popframe(java_thread); 853 check_and_handle_earlyret(java_thread); 854 855 if (check_exceptions) { 856 // check for pending exceptions (java_thread is set upon return) 857 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 858 Label ok; 859 cbz(rscratch1, ok); 860 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 861 br(rscratch1); 862 bind(ok); 863 } 864 865 // get oop result if there is one and reset the value in the thread 866 if (oop_result->is_valid()) { 867 get_vm_result(oop_result, java_thread); 868 } 869 } 870 871 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 872 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 873 } 874 875 // Check the entry target is always reachable from any branch. 876 static bool is_always_within_branch_range(Address entry) { 877 if (SCCache::is_on_for_write()) { 878 return false; 879 } 880 const address target = entry.target(); 881 882 if (!CodeCache::contains(target)) { 883 // We always use trampolines for callees outside CodeCache. 884 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 885 return false; 886 } 887 888 if (!MacroAssembler::far_branches()) { 889 return true; 890 } 891 892 if (entry.rspec().type() == relocInfo::runtime_call_type) { 893 // Runtime calls are calls of a non-compiled method (stubs, adapters). 894 // Non-compiled methods stay forever in CodeCache. 895 // We check whether the longest possible branch is within the branch range. 896 assert(CodeCache::find_blob(target) != nullptr && 897 !CodeCache::find_blob(target)->is_nmethod(), 898 "runtime call of compiled method"); 899 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 900 const address left_longest_branch_start = CodeCache::low_bound(); 901 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 902 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 903 return is_reachable; 904 } 905 906 return false; 907 } 908 909 // Maybe emit a call via a trampoline. If the code cache is small 910 // trampolines won't be emitted. 911 address MacroAssembler::trampoline_call(Address entry) { 912 assert(entry.rspec().type() == relocInfo::runtime_call_type 913 || entry.rspec().type() == relocInfo::opt_virtual_call_type 914 || entry.rspec().type() == relocInfo::static_call_type 915 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 916 917 address target = entry.target(); 918 919 if (!is_always_within_branch_range(entry)) { 920 if (!in_scratch_emit_size()) { 921 // We don't want to emit a trampoline if C2 is generating dummy 922 // code during its branch shortening phase. 923 if (entry.rspec().type() == relocInfo::runtime_call_type) { 924 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 925 code()->share_trampoline_for(entry.target(), offset()); 926 } else { 927 address stub = emit_trampoline_stub(offset(), target); 928 if (stub == nullptr) { 929 postcond(pc() == badAddress); 930 return nullptr; // CodeCache is full 931 } 932 } 933 } 934 target = pc(); 935 } 936 937 address call_pc = pc(); 938 relocate(entry.rspec()); 939 bl(target); 940 941 postcond(pc() != badAddress); 942 return call_pc; 943 } 944 945 // Emit a trampoline stub for a call to a target which is too far away. 946 // 947 // code sequences: 948 // 949 // call-site: 950 // branch-and-link to <destination> or <trampoline stub> 951 // 952 // Related trampoline stub for this call site in the stub section: 953 // load the call target from the constant pool 954 // branch (LR still points to the call site above) 955 956 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 957 address dest) { 958 // Max stub size: alignment nop, TrampolineStub. 959 address stub = start_a_stub(max_trampoline_stub_size()); 960 if (stub == nullptr) { 961 return nullptr; // CodeBuffer::expand failed 962 } 963 964 // Create a trampoline stub relocation which relates this trampoline stub 965 // with the call instruction at insts_call_instruction_offset in the 966 // instructions code-section. 967 align(wordSize); 968 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 969 + insts_call_instruction_offset)); 970 const int stub_start_offset = offset(); 971 972 // Now, create the trampoline stub's code: 973 // - load the call 974 // - call 975 Label target; 976 ldr(rscratch1, target); 977 br(rscratch1); 978 bind(target); 979 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 980 "should be"); 981 emit_int64((int64_t)dest); 982 983 const address stub_start_addr = addr_at(stub_start_offset); 984 985 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 986 987 end_a_stub(); 988 return stub_start_addr; 989 } 990 991 int MacroAssembler::max_trampoline_stub_size() { 992 // Max stub size: alignment nop, TrampolineStub. 993 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 994 } 995 996 void MacroAssembler::emit_static_call_stub() { 997 // CompiledDirectCall::set_to_interpreted knows the 998 // exact layout of this stub. 999 1000 isb(); 1001 mov_metadata(rmethod, nullptr); 1002 1003 // Jump to the entry point of the c2i stub. 1004 movptr(rscratch1, 0); 1005 br(rscratch1); 1006 } 1007 1008 int MacroAssembler::static_call_stub_size() { 1009 // isb; movk; movz; movz; movk; movz; movz; br 1010 return 8 * NativeInstruction::instruction_size; 1011 } 1012 1013 void MacroAssembler::c2bool(Register x) { 1014 // implements x == 0 ? 0 : 1 1015 // note: must only look at least-significant byte of x 1016 // since C-style booleans are stored in one byte 1017 // only! (was bug) 1018 tst(x, 0xff); 1019 cset(x, Assembler::NE); 1020 } 1021 1022 address MacroAssembler::ic_call(address entry, jint method_index) { 1023 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1024 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1025 // uintptr_t offset; 1026 // ldr_constant(rscratch2, const_ptr); 1027 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1028 return trampoline_call(Address(entry, rh)); 1029 } 1030 1031 int MacroAssembler::ic_check_size() { 1032 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1033 return NativeInstruction::instruction_size * 7; 1034 } else { 1035 return NativeInstruction::instruction_size * 5; 1036 } 1037 } 1038 1039 int MacroAssembler::ic_check(int end_alignment) { 1040 Register receiver = j_rarg0; 1041 Register data = rscratch2; 1042 Register tmp1 = rscratch1; 1043 Register tmp2 = r10; 1044 1045 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1046 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1047 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1048 // before the inline cache check here, and not after 1049 align(end_alignment, offset() + ic_check_size()); 1050 1051 int uep_offset = offset(); 1052 1053 if (UseCompressedClassPointers) { 1054 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1055 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1056 cmpw(tmp1, tmp2); 1057 } else { 1058 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1059 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1060 cmp(tmp1, tmp2); 1061 } 1062 1063 Label dont; 1064 br(Assembler::EQ, dont); 1065 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1066 bind(dont); 1067 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1068 1069 return uep_offset; 1070 } 1071 1072 // Implementation of call_VM versions 1073 1074 void MacroAssembler::call_VM(Register oop_result, 1075 address entry_point, 1076 bool check_exceptions) { 1077 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1078 } 1079 1080 void MacroAssembler::call_VM(Register oop_result, 1081 address entry_point, 1082 Register arg_1, 1083 bool check_exceptions) { 1084 pass_arg1(this, arg_1); 1085 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1086 } 1087 1088 void MacroAssembler::call_VM(Register oop_result, 1089 address entry_point, 1090 Register arg_1, 1091 Register arg_2, 1092 bool check_exceptions) { 1093 assert_different_registers(arg_1, c_rarg2); 1094 pass_arg2(this, arg_2); 1095 pass_arg1(this, arg_1); 1096 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1097 } 1098 1099 void MacroAssembler::call_VM(Register oop_result, 1100 address entry_point, 1101 Register arg_1, 1102 Register arg_2, 1103 Register arg_3, 1104 bool check_exceptions) { 1105 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1106 assert_different_registers(arg_2, c_rarg3); 1107 pass_arg3(this, arg_3); 1108 1109 pass_arg2(this, arg_2); 1110 1111 pass_arg1(this, arg_1); 1112 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1113 } 1114 1115 void MacroAssembler::call_VM(Register oop_result, 1116 Register last_java_sp, 1117 address entry_point, 1118 int number_of_arguments, 1119 bool check_exceptions) { 1120 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1121 } 1122 1123 void MacroAssembler::call_VM(Register oop_result, 1124 Register last_java_sp, 1125 address entry_point, 1126 Register arg_1, 1127 bool check_exceptions) { 1128 pass_arg1(this, arg_1); 1129 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1130 } 1131 1132 void MacroAssembler::call_VM(Register oop_result, 1133 Register last_java_sp, 1134 address entry_point, 1135 Register arg_1, 1136 Register arg_2, 1137 bool check_exceptions) { 1138 1139 assert_different_registers(arg_1, c_rarg2); 1140 pass_arg2(this, arg_2); 1141 pass_arg1(this, arg_1); 1142 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1143 } 1144 1145 void MacroAssembler::call_VM(Register oop_result, 1146 Register last_java_sp, 1147 address entry_point, 1148 Register arg_1, 1149 Register arg_2, 1150 Register arg_3, 1151 bool check_exceptions) { 1152 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1153 assert_different_registers(arg_2, c_rarg3); 1154 pass_arg3(this, arg_3); 1155 pass_arg2(this, arg_2); 1156 pass_arg1(this, arg_1); 1157 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1158 } 1159 1160 1161 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1162 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1163 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1164 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1165 } 1166 1167 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1168 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1169 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1170 } 1171 1172 void MacroAssembler::align(int modulus) { 1173 align(modulus, offset()); 1174 } 1175 1176 // Ensure that the code at target bytes offset from the current offset() is aligned 1177 // according to modulus. 1178 void MacroAssembler::align(int modulus, int target) { 1179 int delta = target - offset(); 1180 while ((offset() + delta) % modulus != 0) nop(); 1181 } 1182 1183 void MacroAssembler::post_call_nop() { 1184 if (!Continuations::enabled()) { 1185 return; 1186 } 1187 InstructionMark im(this); 1188 relocate(post_call_nop_Relocation::spec()); 1189 InlineSkippedInstructionsCounter skipCounter(this); 1190 nop(); 1191 movk(zr, 0); 1192 movk(zr, 0); 1193 } 1194 1195 // these are no-ops overridden by InterpreterMacroAssembler 1196 1197 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1198 1199 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1200 1201 // Look up the method for a megamorphic invokeinterface call. 1202 // The target method is determined by <intf_klass, itable_index>. 1203 // The receiver klass is in recv_klass. 1204 // On success, the result will be in method_result, and execution falls through. 1205 // On failure, execution transfers to the given label. 1206 void MacroAssembler::lookup_interface_method(Register recv_klass, 1207 Register intf_klass, 1208 RegisterOrConstant itable_index, 1209 Register method_result, 1210 Register scan_temp, 1211 Label& L_no_such_interface, 1212 bool return_method) { 1213 assert_different_registers(recv_klass, intf_klass, scan_temp); 1214 assert_different_registers(method_result, intf_klass, scan_temp); 1215 assert(recv_klass != method_result || !return_method, 1216 "recv_klass can be destroyed when method isn't needed"); 1217 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1218 "caller must use same register for non-constant itable index as for method"); 1219 1220 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1221 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1222 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1223 int scan_step = itableOffsetEntry::size() * wordSize; 1224 int vte_size = vtableEntry::size_in_bytes(); 1225 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1226 1227 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1228 1229 // Could store the aligned, prescaled offset in the klass. 1230 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1231 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1232 add(scan_temp, scan_temp, vtable_base); 1233 1234 if (return_method) { 1235 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1236 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1237 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1238 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1239 if (itentry_off) 1240 add(recv_klass, recv_klass, itentry_off); 1241 } 1242 1243 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1244 // if (scan->interface() == intf) { 1245 // result = (klass + scan->offset() + itable_index); 1246 // } 1247 // } 1248 Label search, found_method; 1249 1250 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1251 cmp(intf_klass, method_result); 1252 br(Assembler::EQ, found_method); 1253 bind(search); 1254 // Check that the previous entry is non-null. A null entry means that 1255 // the receiver class doesn't implement the interface, and wasn't the 1256 // same as when the caller was compiled. 1257 cbz(method_result, L_no_such_interface); 1258 if (itableOffsetEntry::interface_offset() != 0) { 1259 add(scan_temp, scan_temp, scan_step); 1260 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1261 } else { 1262 ldr(method_result, Address(pre(scan_temp, scan_step))); 1263 } 1264 cmp(intf_klass, method_result); 1265 br(Assembler::NE, search); 1266 1267 bind(found_method); 1268 1269 // Got a hit. 1270 if (return_method) { 1271 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1272 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1273 } 1274 } 1275 1276 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1277 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1278 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1279 // The target method is determined by <holder_klass, itable_index>. 1280 // The receiver klass is in recv_klass. 1281 // On success, the result will be in method_result, and execution falls through. 1282 // On failure, execution transfers to the given label. 1283 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1284 Register holder_klass, 1285 Register resolved_klass, 1286 Register method_result, 1287 Register temp_itbl_klass, 1288 Register scan_temp, 1289 int itable_index, 1290 Label& L_no_such_interface) { 1291 // 'method_result' is only used as output register at the very end of this method. 1292 // Until then we can reuse it as 'holder_offset'. 1293 Register holder_offset = method_result; 1294 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1295 1296 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1297 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1298 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1299 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1300 1301 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1302 1303 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1304 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1305 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1306 // temp_itbl_klass = itable[0]._interface; 1307 int vtblEntrySize = vtableEntry::size_in_bytes(); 1308 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1309 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1310 mov(holder_offset, zr); 1311 // scan_temp = &(itable[0]._interface) 1312 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1313 1314 // Initial checks: 1315 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1316 // - if (itable[0] == holder_klass), shortcut to "holder found" 1317 // - if (itable[0] == 0), no such interface 1318 cmp(resolved_klass, holder_klass); 1319 br(Assembler::NE, L_loop_search_resolved_entry); 1320 cmp(holder_klass, temp_itbl_klass); 1321 br(Assembler::EQ, L_holder_found); 1322 cbz(temp_itbl_klass, L_no_such_interface); 1323 1324 // Loop: Look for holder_klass record in itable 1325 // do { 1326 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1327 // if (temp_itbl_klass == holder_klass) { 1328 // goto L_holder_found; // Found! 1329 // } 1330 // } while (temp_itbl_klass != 0); 1331 // goto L_no_such_interface // Not found. 1332 Label L_search_holder; 1333 bind(L_search_holder); 1334 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1335 cmp(holder_klass, temp_itbl_klass); 1336 br(Assembler::EQ, L_holder_found); 1337 cbnz(temp_itbl_klass, L_search_holder); 1338 1339 b(L_no_such_interface); 1340 1341 // Loop: Look for resolved_class record in itable 1342 // while (true) { 1343 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1344 // if (temp_itbl_klass == 0) { 1345 // goto L_no_such_interface; 1346 // } 1347 // if (temp_itbl_klass == resolved_klass) { 1348 // goto L_resolved_found; // Found! 1349 // } 1350 // if (temp_itbl_klass == holder_klass) { 1351 // holder_offset = scan_temp; 1352 // } 1353 // } 1354 // 1355 Label L_loop_search_resolved; 1356 bind(L_loop_search_resolved); 1357 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1358 bind(L_loop_search_resolved_entry); 1359 cbz(temp_itbl_klass, L_no_such_interface); 1360 cmp(resolved_klass, temp_itbl_klass); 1361 br(Assembler::EQ, L_resolved_found); 1362 cmp(holder_klass, temp_itbl_klass); 1363 br(Assembler::NE, L_loop_search_resolved); 1364 mov(holder_offset, scan_temp); 1365 b(L_loop_search_resolved); 1366 1367 // See if we already have a holder klass. If not, go and scan for it. 1368 bind(L_resolved_found); 1369 cbz(holder_offset, L_search_holder); 1370 mov(scan_temp, holder_offset); 1371 1372 // Finally, scan_temp contains holder_klass vtable offset 1373 bind(L_holder_found); 1374 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1375 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1376 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1377 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1378 } 1379 1380 // virtual method calling 1381 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1382 RegisterOrConstant vtable_index, 1383 Register method_result) { 1384 assert(vtableEntry::size() * wordSize == 8, 1385 "adjust the scaling in the code below"); 1386 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1387 1388 if (vtable_index.is_register()) { 1389 lea(method_result, Address(recv_klass, 1390 vtable_index.as_register(), 1391 Address::lsl(LogBytesPerWord))); 1392 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1393 } else { 1394 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1395 ldr(method_result, 1396 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1397 } 1398 } 1399 1400 void MacroAssembler::check_klass_subtype(Register sub_klass, 1401 Register super_klass, 1402 Register temp_reg, 1403 Label& L_success) { 1404 Label L_failure; 1405 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1406 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1407 bind(L_failure); 1408 } 1409 1410 1411 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1412 Register super_klass, 1413 Register temp_reg, 1414 Label* L_success, 1415 Label* L_failure, 1416 Label* L_slow_path, 1417 RegisterOrConstant super_check_offset) { 1418 assert_different_registers(sub_klass, super_klass, temp_reg); 1419 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1420 if (super_check_offset.is_register()) { 1421 assert_different_registers(sub_klass, super_klass, 1422 super_check_offset.as_register()); 1423 } else if (must_load_sco) { 1424 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1425 } 1426 1427 Label L_fallthrough; 1428 int label_nulls = 0; 1429 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1430 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1431 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1432 assert(label_nulls <= 1, "at most one null in the batch"); 1433 1434 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1435 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1436 Address super_check_offset_addr(super_klass, sco_offset); 1437 1438 // Hacked jmp, which may only be used just before L_fallthrough. 1439 #define final_jmp(label) \ 1440 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1441 else b(label) /*omit semi*/ 1442 1443 // If the pointers are equal, we are done (e.g., String[] elements). 1444 // This self-check enables sharing of secondary supertype arrays among 1445 // non-primary types such as array-of-interface. Otherwise, each such 1446 // type would need its own customized SSA. 1447 // We move this check to the front of the fast path because many 1448 // type checks are in fact trivially successful in this manner, 1449 // so we get a nicely predicted branch right at the start of the check. 1450 cmp(sub_klass, super_klass); 1451 br(Assembler::EQ, *L_success); 1452 1453 // Check the supertype display: 1454 if (must_load_sco) { 1455 ldrw(temp_reg, super_check_offset_addr); 1456 super_check_offset = RegisterOrConstant(temp_reg); 1457 } 1458 Address super_check_addr(sub_klass, super_check_offset); 1459 ldr(rscratch1, super_check_addr); 1460 cmp(super_klass, rscratch1); // load displayed supertype 1461 1462 // This check has worked decisively for primary supers. 1463 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1464 // (Secondary supers are interfaces and very deeply nested subtypes.) 1465 // This works in the same check above because of a tricky aliasing 1466 // between the super_cache and the primary super display elements. 1467 // (The 'super_check_addr' can address either, as the case requires.) 1468 // Note that the cache is updated below if it does not help us find 1469 // what we need immediately. 1470 // So if it was a primary super, we can just fail immediately. 1471 // Otherwise, it's the slow path for us (no success at this point). 1472 1473 if (super_check_offset.is_register()) { 1474 br(Assembler::EQ, *L_success); 1475 subs(zr, super_check_offset.as_register(), sc_offset); 1476 if (L_failure == &L_fallthrough) { 1477 br(Assembler::EQ, *L_slow_path); 1478 } else { 1479 br(Assembler::NE, *L_failure); 1480 final_jmp(*L_slow_path); 1481 } 1482 } else if (super_check_offset.as_constant() == sc_offset) { 1483 // Need a slow path; fast failure is impossible. 1484 if (L_slow_path == &L_fallthrough) { 1485 br(Assembler::EQ, *L_success); 1486 } else { 1487 br(Assembler::NE, *L_slow_path); 1488 final_jmp(*L_success); 1489 } 1490 } else { 1491 // No slow path; it's a fast decision. 1492 if (L_failure == &L_fallthrough) { 1493 br(Assembler::EQ, *L_success); 1494 } else { 1495 br(Assembler::NE, *L_failure); 1496 final_jmp(*L_success); 1497 } 1498 } 1499 1500 bind(L_fallthrough); 1501 1502 #undef final_jmp 1503 } 1504 1505 // These two are taken from x86, but they look generally useful 1506 1507 // scans count pointer sized words at [addr] for occurrence of value, 1508 // generic 1509 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1510 Register scratch) { 1511 Label Lloop, Lexit; 1512 cbz(count, Lexit); 1513 bind(Lloop); 1514 ldr(scratch, post(addr, wordSize)); 1515 cmp(value, scratch); 1516 br(EQ, Lexit); 1517 sub(count, count, 1); 1518 cbnz(count, Lloop); 1519 bind(Lexit); 1520 } 1521 1522 // scans count 4 byte words at [addr] for occurrence of value, 1523 // generic 1524 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1525 Register scratch) { 1526 Label Lloop, Lexit; 1527 cbz(count, Lexit); 1528 bind(Lloop); 1529 ldrw(scratch, post(addr, wordSize)); 1530 cmpw(value, scratch); 1531 br(EQ, Lexit); 1532 sub(count, count, 1); 1533 cbnz(count, Lloop); 1534 bind(Lexit); 1535 } 1536 1537 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1538 Register super_klass, 1539 Register temp_reg, 1540 Register temp2_reg, 1541 Label* L_success, 1542 Label* L_failure, 1543 bool set_cond_codes) { 1544 // NB! Callers may assume that, when temp2_reg is a valid register, 1545 // this code sets it to a nonzero value. 1546 1547 assert_different_registers(sub_klass, super_klass, temp_reg); 1548 if (temp2_reg != noreg) 1549 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1550 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1551 1552 Label L_fallthrough; 1553 int label_nulls = 0; 1554 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1555 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1556 assert(label_nulls <= 1, "at most one null in the batch"); 1557 1558 // a couple of useful fields in sub_klass: 1559 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1560 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1561 Address secondary_supers_addr(sub_klass, ss_offset); 1562 Address super_cache_addr( sub_klass, sc_offset); 1563 1564 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1565 1566 // Do a linear scan of the secondary super-klass chain. 1567 // This code is rarely used, so simplicity is a virtue here. 1568 // The repne_scan instruction uses fixed registers, which we must spill. 1569 // Don't worry too much about pre-existing connections with the input regs. 1570 1571 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1572 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1573 1574 RegSet pushed_registers; 1575 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1576 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1577 1578 if (super_klass != r0) { 1579 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1580 } 1581 1582 push(pushed_registers, sp); 1583 1584 // Get super_klass value into r0 (even if it was in r5 or r2). 1585 if (super_klass != r0) { 1586 mov(r0, super_klass); 1587 } 1588 1589 #ifndef PRODUCT 1590 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1591 #endif //PRODUCT 1592 1593 // We will consult the secondary-super array. 1594 ldr(r5, secondary_supers_addr); 1595 // Load the array length. 1596 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1597 // Skip to start of data. 1598 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1599 1600 cmp(sp, zr); // Clear Z flag; SP is never zero 1601 // Scan R2 words at [R5] for an occurrence of R0. 1602 // Set NZ/Z based on last compare. 1603 repne_scan(r5, r0, r2, rscratch1); 1604 1605 // Unspill the temp. registers: 1606 pop(pushed_registers, sp); 1607 1608 br(Assembler::NE, *L_failure); 1609 1610 // Success. Cache the super we found and proceed in triumph. 1611 str(super_klass, super_cache_addr); 1612 1613 if (L_success != &L_fallthrough) { 1614 b(*L_success); 1615 } 1616 1617 #undef IS_A_TEMP 1618 1619 bind(L_fallthrough); 1620 } 1621 1622 // Ensure that the inline code and the stub are using the same registers. 1623 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1624 do { \ 1625 assert(r_super_klass == r0 && \ 1626 r_array_base == r1 && \ 1627 r_array_length == r2 && \ 1628 (r_array_index == r3 || r_array_index == noreg) && \ 1629 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1630 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1631 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1632 } while(0) 1633 1634 // Return true: we succeeded in generating this code 1635 bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 1636 Register r_super_klass, 1637 Register temp1, 1638 Register temp2, 1639 Register temp3, 1640 FloatRegister vtemp, 1641 Register result, 1642 u1 super_klass_slot, 1643 bool stub_is_near) { 1644 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1645 1646 Label L_fallthrough; 1647 1648 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1649 1650 const Register 1651 r_array_base = temp1, // r1 1652 r_array_length = temp2, // r2 1653 r_array_index = temp3, // r3 1654 r_bitmap = rscratch2; 1655 1656 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1657 1658 u1 bit = super_klass_slot; 1659 1660 // Make sure that result is nonzero if the TBZ below misses. 1661 mov(result, 1); 1662 1663 // We're going to need the bitmap in a vector reg and in a core reg, 1664 // so load both now. 1665 ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 1666 if (bit != 0) { 1667 ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset())); 1668 } 1669 // First check the bitmap to see if super_klass might be present. If 1670 // the bit is zero, we are certain that super_klass is not one of 1671 // the secondary supers. 1672 tbz(r_bitmap, bit, L_fallthrough); 1673 1674 // Get the first array index that can contain super_klass into r_array_index. 1675 if (bit != 0) { 1676 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1677 cnt(vtemp, T8B, vtemp); 1678 addv(vtemp, T8B, vtemp); 1679 fmovd(r_array_index, vtemp); 1680 } else { 1681 mov(r_array_index, (u1)1); 1682 } 1683 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1684 1685 // We will consult the secondary-super array. 1686 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1687 1688 // The value i in r_array_index is >= 1, so even though r_array_base 1689 // points to the length, we don't need to adjust it to point to the 1690 // data. 1691 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1692 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1693 1694 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1695 eor(result, result, r_super_klass); 1696 cbz(result, L_fallthrough); // Found a match 1697 1698 // Is there another entry to check? Consult the bitmap. 1699 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1700 1701 // Linear probe. 1702 if (bit != 0) { 1703 ror(r_bitmap, r_bitmap, bit); 1704 } 1705 1706 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1707 // The next slot to be inspected, by the stub we're about to call, 1708 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1709 // have been checked. 1710 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1711 if (stub_is_near) { 1712 bl(stub); 1713 } else { 1714 address call = trampoline_call(stub); 1715 if (call == nullptr) { 1716 return false; // trampoline allocation failed 1717 } 1718 } 1719 1720 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1721 1722 bind(L_fallthrough); 1723 1724 if (VerifySecondarySupers) { 1725 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1726 temp1, temp2, result); // r1, r2, r5 1727 } 1728 return true; 1729 } 1730 1731 // Called by code generated by check_klass_subtype_slow_path 1732 // above. This is called when there is a collision in the hashed 1733 // lookup in the secondary supers array. 1734 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1735 Register r_array_base, 1736 Register r_array_index, 1737 Register r_bitmap, 1738 Register temp1, 1739 Register result) { 1740 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1741 1742 const Register 1743 r_array_length = temp1, 1744 r_sub_klass = noreg; // unused 1745 1746 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1747 1748 Label L_fallthrough, L_huge; 1749 1750 // Load the array length. 1751 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1752 // And adjust the array base to point to the data. 1753 // NB! Effectively increments current slot index by 1. 1754 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1755 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1756 1757 // The bitmap is full to bursting. 1758 // Implicit invariant: BITMAP_FULL implies (length > 0) 1759 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1760 cmn(r_bitmap, (u1)1); 1761 br(EQ, L_huge); 1762 1763 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1764 // current slot (at secondary_supers[r_array_index]) has not yet 1765 // been inspected, and r_array_index may be out of bounds if we 1766 // wrapped around the end of the array. 1767 1768 { // This is conventional linear probing, but instead of terminating 1769 // when a null entry is found in the table, we maintain a bitmap 1770 // in which a 0 indicates missing entries. 1771 // The check above guarantees there are 0s in the bitmap, so the loop 1772 // eventually terminates. 1773 Label L_loop; 1774 bind(L_loop); 1775 1776 // Check for wraparound. 1777 cmp(r_array_index, r_array_length); 1778 csel(r_array_index, zr, r_array_index, GE); 1779 1780 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1781 eor(result, rscratch1, r_super_klass); 1782 cbz(result, L_fallthrough); 1783 1784 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1785 1786 ror(r_bitmap, r_bitmap, 1); 1787 add(r_array_index, r_array_index, 1); 1788 b(L_loop); 1789 } 1790 1791 { // Degenerate case: more than 64 secondary supers. 1792 // FIXME: We could do something smarter here, maybe a vectorized 1793 // comparison or a binary search, but is that worth any added 1794 // complexity? 1795 bind(L_huge); 1796 cmp(sp, zr); // Clear Z flag; SP is never zero 1797 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1798 cset(result, NE); // result == 0 iff we got a match. 1799 } 1800 1801 bind(L_fallthrough); 1802 } 1803 1804 // Make sure that the hashed lookup and a linear scan agree. 1805 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1806 Register r_super_klass, 1807 Register temp1, 1808 Register temp2, 1809 Register result) { 1810 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1811 1812 const Register 1813 r_array_base = temp1, 1814 r_array_length = temp2, 1815 r_array_index = noreg, // unused 1816 r_bitmap = noreg; // unused 1817 1818 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1819 1820 BLOCK_COMMENT("verify_secondary_supers_table {"); 1821 1822 // We will consult the secondary-super array. 1823 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1824 1825 // Load the array length. 1826 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1827 // And adjust the array base to point to the data. 1828 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1829 1830 cmp(sp, zr); // Clear Z flag; SP is never zero 1831 // Scan R2 words at [R5] for an occurrence of R0. 1832 // Set NZ/Z based on last compare. 1833 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 1834 // rscratch1 == 0 iff we got a match. 1835 cset(rscratch1, NE); 1836 1837 Label passed; 1838 cmp(result, zr); 1839 cset(result, NE); // normalize result to 0/1 for comparison 1840 1841 cmp(rscratch1, result); 1842 br(EQ, passed); 1843 { 1844 mov(r0, r_super_klass); // r0 <- r0 1845 mov(r1, r_sub_klass); // r1 <- r4 1846 mov(r2, /*expected*/rscratch1); // r2 <- r8 1847 mov(r3, result); // r3 <- r5 1848 mov(r4, (address)("mismatch")); // r4 <- const 1849 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 1850 should_not_reach_here(); 1851 } 1852 bind(passed); 1853 1854 BLOCK_COMMENT("} verify_secondary_supers_table"); 1855 } 1856 1857 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1858 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1859 assert_different_registers(klass, rthread, scratch); 1860 1861 Label L_fallthrough, L_tmp; 1862 if (L_fast_path == nullptr) { 1863 L_fast_path = &L_fallthrough; 1864 } else if (L_slow_path == nullptr) { 1865 L_slow_path = &L_fallthrough; 1866 } 1867 // Fast path check: class is fully initialized 1868 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset())); 1869 subs(zr, scratch, InstanceKlass::fully_initialized); 1870 br(Assembler::EQ, *L_fast_path); 1871 1872 // Fast path check: current thread is initializer thread 1873 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1874 cmp(rthread, scratch); 1875 1876 if (L_slow_path == &L_fallthrough) { 1877 br(Assembler::EQ, *L_fast_path); 1878 bind(*L_slow_path); 1879 } else if (L_fast_path == &L_fallthrough) { 1880 br(Assembler::NE, *L_slow_path); 1881 bind(*L_fast_path); 1882 } else { 1883 Unimplemented(); 1884 } 1885 } 1886 1887 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1888 if (!VerifyOops) return; 1889 1890 // Pass register number to verify_oop_subroutine 1891 const char* b = nullptr; 1892 { 1893 ResourceMark rm; 1894 stringStream ss; 1895 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1896 b = code_string(ss.as_string()); 1897 } 1898 BLOCK_COMMENT("verify_oop {"); 1899 1900 strip_return_address(); // This might happen within a stack frame. 1901 protect_return_address(); 1902 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1903 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1904 1905 mov(r0, reg); 1906 movptr(rscratch1, (uintptr_t)(address)b); 1907 1908 // call indirectly to solve generation ordering problem 1909 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1910 ldr(rscratch2, Address(rscratch2)); 1911 blr(rscratch2); 1912 1913 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1914 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1915 authenticate_return_address(); 1916 1917 BLOCK_COMMENT("} verify_oop"); 1918 } 1919 1920 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1921 if (!VerifyOops) return; 1922 1923 const char* b = nullptr; 1924 { 1925 ResourceMark rm; 1926 stringStream ss; 1927 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1928 b = code_string(ss.as_string()); 1929 } 1930 BLOCK_COMMENT("verify_oop_addr {"); 1931 1932 strip_return_address(); // This might happen within a stack frame. 1933 protect_return_address(); 1934 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1935 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1936 1937 // addr may contain sp so we will have to adjust it based on the 1938 // pushes that we just did. 1939 if (addr.uses(sp)) { 1940 lea(r0, addr); 1941 ldr(r0, Address(r0, 4 * wordSize)); 1942 } else { 1943 ldr(r0, addr); 1944 } 1945 movptr(rscratch1, (uintptr_t)(address)b); 1946 1947 // call indirectly to solve generation ordering problem 1948 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1949 ldr(rscratch2, Address(rscratch2)); 1950 blr(rscratch2); 1951 1952 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1953 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1954 authenticate_return_address(); 1955 1956 BLOCK_COMMENT("} verify_oop_addr"); 1957 } 1958 1959 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1960 int extra_slot_offset) { 1961 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1962 int stackElementSize = Interpreter::stackElementSize; 1963 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1964 #ifdef ASSERT 1965 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1966 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1967 #endif 1968 if (arg_slot.is_constant()) { 1969 return Address(esp, arg_slot.as_constant() * stackElementSize 1970 + offset); 1971 } else { 1972 add(rscratch1, esp, arg_slot.as_register(), 1973 ext::uxtx, exact_log2(stackElementSize)); 1974 return Address(rscratch1, offset); 1975 } 1976 } 1977 1978 void MacroAssembler::call_VM_leaf_base(address entry_point, 1979 int number_of_arguments, 1980 Label *retaddr) { 1981 Label E, L; 1982 1983 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1984 1985 mov(rscratch1, entry_point); 1986 blr(rscratch1); 1987 if (retaddr) 1988 bind(*retaddr); 1989 1990 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1991 } 1992 1993 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1994 call_VM_leaf_base(entry_point, number_of_arguments); 1995 } 1996 1997 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1998 pass_arg0(this, arg_0); 1999 call_VM_leaf_base(entry_point, 1); 2000 } 2001 2002 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2003 assert_different_registers(arg_1, c_rarg0); 2004 pass_arg0(this, arg_0); 2005 pass_arg1(this, arg_1); 2006 call_VM_leaf_base(entry_point, 2); 2007 } 2008 2009 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2010 Register arg_1, Register arg_2) { 2011 assert_different_registers(arg_1, c_rarg0); 2012 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2013 pass_arg0(this, arg_0); 2014 pass_arg1(this, arg_1); 2015 pass_arg2(this, arg_2); 2016 call_VM_leaf_base(entry_point, 3); 2017 } 2018 2019 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2020 pass_arg0(this, arg_0); 2021 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2022 } 2023 2024 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2025 2026 assert_different_registers(arg_0, c_rarg1); 2027 pass_arg1(this, arg_1); 2028 pass_arg0(this, arg_0); 2029 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2030 } 2031 2032 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2033 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2034 assert_different_registers(arg_1, c_rarg2); 2035 pass_arg2(this, arg_2); 2036 pass_arg1(this, arg_1); 2037 pass_arg0(this, arg_0); 2038 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2039 } 2040 2041 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2042 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2043 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2044 assert_different_registers(arg_2, c_rarg3); 2045 pass_arg3(this, arg_3); 2046 pass_arg2(this, arg_2); 2047 pass_arg1(this, arg_1); 2048 pass_arg0(this, arg_0); 2049 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2050 } 2051 2052 void MacroAssembler::null_check(Register reg, int offset) { 2053 if (needs_explicit_null_check(offset)) { 2054 // provoke OS null exception if reg is null by 2055 // accessing M[reg] w/o changing any registers 2056 // NOTE: this is plenty to provoke a segv 2057 ldr(zr, Address(reg)); 2058 } else { 2059 // nothing to do, (later) access of M[reg + offset] 2060 // will provoke OS null exception if reg is null 2061 } 2062 } 2063 2064 // MacroAssembler protected routines needed to implement 2065 // public methods 2066 2067 void MacroAssembler::mov(Register r, Address dest) { 2068 code_section()->relocate(pc(), dest.rspec()); 2069 uint64_t imm64 = (uint64_t)dest.target(); 2070 movptr(r, imm64); 2071 } 2072 2073 // Move a constant pointer into r. In AArch64 mode the virtual 2074 // address space is 48 bits in size, so we only need three 2075 // instructions to create a patchable instruction sequence that can 2076 // reach anywhere. 2077 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2078 #ifndef PRODUCT 2079 { 2080 char buffer[64]; 2081 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2082 block_comment(buffer); 2083 } 2084 #endif 2085 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2086 movz(r, imm64 & 0xffff); 2087 imm64 >>= 16; 2088 movk(r, imm64 & 0xffff, 16); 2089 imm64 >>= 16; 2090 movk(r, imm64 & 0xffff, 32); 2091 } 2092 2093 // Macro to mov replicated immediate to vector register. 2094 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2095 // the upper 56/48/32 bits must be zeros for B/H/S type. 2096 // Vd will get the following values for different arrangements in T 2097 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2098 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2099 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2100 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2101 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2102 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2103 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2104 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2105 // Clobbers rscratch1 2106 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2107 assert(T != T1Q, "unsupported"); 2108 if (T == T1D || T == T2D) { 2109 int imm = operand_valid_for_movi_immediate(imm64, T); 2110 if (-1 != imm) { 2111 movi(Vd, T, imm); 2112 } else { 2113 mov(rscratch1, imm64); 2114 dup(Vd, T, rscratch1); 2115 } 2116 return; 2117 } 2118 2119 #ifdef ASSERT 2120 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2121 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2122 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2123 #endif 2124 int shift = operand_valid_for_movi_immediate(imm64, T); 2125 uint32_t imm32 = imm64 & 0xffffffffULL; 2126 if (shift >= 0) { 2127 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2128 } else { 2129 movw(rscratch1, imm32); 2130 dup(Vd, T, rscratch1); 2131 } 2132 } 2133 2134 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2135 { 2136 #ifndef PRODUCT 2137 { 2138 char buffer[64]; 2139 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2140 block_comment(buffer); 2141 } 2142 #endif 2143 if (operand_valid_for_logical_immediate(false, imm64)) { 2144 orr(dst, zr, imm64); 2145 } else { 2146 // we can use a combination of MOVZ or MOVN with 2147 // MOVK to build up the constant 2148 uint64_t imm_h[4]; 2149 int zero_count = 0; 2150 int neg_count = 0; 2151 int i; 2152 for (i = 0; i < 4; i++) { 2153 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2154 if (imm_h[i] == 0) { 2155 zero_count++; 2156 } else if (imm_h[i] == 0xffffL) { 2157 neg_count++; 2158 } 2159 } 2160 if (zero_count == 4) { 2161 // one MOVZ will do 2162 movz(dst, 0); 2163 } else if (neg_count == 4) { 2164 // one MOVN will do 2165 movn(dst, 0); 2166 } else if (zero_count == 3) { 2167 for (i = 0; i < 4; i++) { 2168 if (imm_h[i] != 0L) { 2169 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2170 break; 2171 } 2172 } 2173 } else if (neg_count == 3) { 2174 // one MOVN will do 2175 for (int i = 0; i < 4; i++) { 2176 if (imm_h[i] != 0xffffL) { 2177 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2178 break; 2179 } 2180 } 2181 } else if (zero_count == 2) { 2182 // one MOVZ and one MOVK will do 2183 for (i = 0; i < 3; i++) { 2184 if (imm_h[i] != 0L) { 2185 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2186 i++; 2187 break; 2188 } 2189 } 2190 for (;i < 4; i++) { 2191 if (imm_h[i] != 0L) { 2192 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2193 } 2194 } 2195 } else if (neg_count == 2) { 2196 // one MOVN and one MOVK will do 2197 for (i = 0; i < 4; i++) { 2198 if (imm_h[i] != 0xffffL) { 2199 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2200 i++; 2201 break; 2202 } 2203 } 2204 for (;i < 4; i++) { 2205 if (imm_h[i] != 0xffffL) { 2206 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2207 } 2208 } 2209 } else if (zero_count == 1) { 2210 // one MOVZ and two MOVKs will do 2211 for (i = 0; i < 4; i++) { 2212 if (imm_h[i] != 0L) { 2213 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2214 i++; 2215 break; 2216 } 2217 } 2218 for (;i < 4; i++) { 2219 if (imm_h[i] != 0x0L) { 2220 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2221 } 2222 } 2223 } else if (neg_count == 1) { 2224 // one MOVN and two MOVKs will do 2225 for (i = 0; i < 4; i++) { 2226 if (imm_h[i] != 0xffffL) { 2227 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2228 i++; 2229 break; 2230 } 2231 } 2232 for (;i < 4; i++) { 2233 if (imm_h[i] != 0xffffL) { 2234 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2235 } 2236 } 2237 } else { 2238 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2239 movz(dst, (uint32_t)imm_h[0], 0); 2240 for (i = 1; i < 4; i++) { 2241 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2242 } 2243 } 2244 } 2245 } 2246 2247 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2248 { 2249 #ifndef PRODUCT 2250 { 2251 char buffer[64]; 2252 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2253 block_comment(buffer); 2254 } 2255 #endif 2256 if (operand_valid_for_logical_immediate(true, imm32)) { 2257 orrw(dst, zr, imm32); 2258 } else { 2259 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2260 // constant 2261 uint32_t imm_h[2]; 2262 imm_h[0] = imm32 & 0xffff; 2263 imm_h[1] = ((imm32 >> 16) & 0xffff); 2264 if (imm_h[0] == 0) { 2265 movzw(dst, imm_h[1], 16); 2266 } else if (imm_h[0] == 0xffff) { 2267 movnw(dst, imm_h[1] ^ 0xffff, 16); 2268 } else if (imm_h[1] == 0) { 2269 movzw(dst, imm_h[0], 0); 2270 } else if (imm_h[1] == 0xffff) { 2271 movnw(dst, imm_h[0] ^ 0xffff, 0); 2272 } else { 2273 // use a MOVZ and MOVK (makes it easier to debug) 2274 movzw(dst, imm_h[0], 0); 2275 movkw(dst, imm_h[1], 16); 2276 } 2277 } 2278 } 2279 2280 // Form an address from base + offset in Rd. Rd may or may 2281 // not actually be used: you must use the Address that is returned. 2282 // It is up to you to ensure that the shift provided matches the size 2283 // of your data. 2284 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2285 if (Address::offset_ok_for_immed(byte_offset, shift)) 2286 // It fits; no need for any heroics 2287 return Address(base, byte_offset); 2288 2289 // Don't do anything clever with negative or misaligned offsets 2290 unsigned mask = (1 << shift) - 1; 2291 if (byte_offset < 0 || byte_offset & mask) { 2292 mov(Rd, byte_offset); 2293 add(Rd, base, Rd); 2294 return Address(Rd); 2295 } 2296 2297 // See if we can do this with two 12-bit offsets 2298 { 2299 uint64_t word_offset = byte_offset >> shift; 2300 uint64_t masked_offset = word_offset & 0xfff000; 2301 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2302 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2303 add(Rd, base, masked_offset << shift); 2304 word_offset -= masked_offset; 2305 return Address(Rd, word_offset << shift); 2306 } 2307 } 2308 2309 // Do it the hard way 2310 mov(Rd, byte_offset); 2311 add(Rd, base, Rd); 2312 return Address(Rd); 2313 } 2314 2315 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2316 bool want_remainder, Register scratch) 2317 { 2318 // Full implementation of Java idiv and irem. The function 2319 // returns the (pc) offset of the div instruction - may be needed 2320 // for implicit exceptions. 2321 // 2322 // constraint : ra/rb =/= scratch 2323 // normal case 2324 // 2325 // input : ra: dividend 2326 // rb: divisor 2327 // 2328 // result: either 2329 // quotient (= ra idiv rb) 2330 // remainder (= ra irem rb) 2331 2332 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2333 2334 int idivl_offset = offset(); 2335 if (! want_remainder) { 2336 sdivw(result, ra, rb); 2337 } else { 2338 sdivw(scratch, ra, rb); 2339 Assembler::msubw(result, scratch, rb, ra); 2340 } 2341 2342 return idivl_offset; 2343 } 2344 2345 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2346 bool want_remainder, Register scratch) 2347 { 2348 // Full implementation of Java ldiv and lrem. The function 2349 // returns the (pc) offset of the div instruction - may be needed 2350 // for implicit exceptions. 2351 // 2352 // constraint : ra/rb =/= scratch 2353 // normal case 2354 // 2355 // input : ra: dividend 2356 // rb: divisor 2357 // 2358 // result: either 2359 // quotient (= ra idiv rb) 2360 // remainder (= ra irem rb) 2361 2362 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2363 2364 int idivq_offset = offset(); 2365 if (! want_remainder) { 2366 sdiv(result, ra, rb); 2367 } else { 2368 sdiv(scratch, ra, rb); 2369 Assembler::msub(result, scratch, rb, ra); 2370 } 2371 2372 return idivq_offset; 2373 } 2374 2375 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2376 address prev = pc() - NativeMembar::instruction_size; 2377 address last = code()->last_insn(); 2378 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2379 NativeMembar *bar = NativeMembar_at(prev); 2380 if (AlwaysMergeDMB) { 2381 bar->set_kind(bar->get_kind() | order_constraint); 2382 BLOCK_COMMENT("merged membar(always)"); 2383 return; 2384 } 2385 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2386 // doing so would introduce a StoreLoad which the caller did not 2387 // intend 2388 if (bar->get_kind() == order_constraint 2389 || bar->get_kind() == AnyAny 2390 || order_constraint == AnyAny) { 2391 // We are merging two memory barrier instructions. On AArch64 we 2392 // can do this simply by ORing them together. 2393 bar->set_kind(bar->get_kind() | order_constraint); 2394 BLOCK_COMMENT("merged membar"); 2395 return; 2396 } else { 2397 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2398 // We need check the last 2 instructions 2399 address prev2 = prev - NativeMembar::instruction_size; 2400 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2401 NativeMembar *bar2 = NativeMembar_at(prev2); 2402 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2403 BLOCK_COMMENT("merged membar(elided)"); 2404 return; 2405 } 2406 } 2407 } 2408 code()->set_last_insn(pc()); 2409 dmb(Assembler::barrier(order_constraint)); 2410 } 2411 2412 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2413 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2414 merge_ldst(rt, adr, size_in_bytes, is_store); 2415 code()->clear_last_insn(); 2416 return true; 2417 } else { 2418 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2419 const uint64_t mask = size_in_bytes - 1; 2420 if (adr.getMode() == Address::base_plus_offset && 2421 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2422 code()->set_last_insn(pc()); 2423 } 2424 return false; 2425 } 2426 } 2427 2428 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2429 // We always try to merge two adjacent loads into one ldp. 2430 if (!try_merge_ldst(Rx, adr, 8, false)) { 2431 Assembler::ldr(Rx, adr); 2432 } 2433 } 2434 2435 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2436 // We always try to merge two adjacent loads into one ldp. 2437 if (!try_merge_ldst(Rw, adr, 4, false)) { 2438 Assembler::ldrw(Rw, adr); 2439 } 2440 } 2441 2442 void MacroAssembler::str(Register Rx, const Address &adr) { 2443 // We always try to merge two adjacent stores into one stp. 2444 if (!try_merge_ldst(Rx, adr, 8, true)) { 2445 Assembler::str(Rx, adr); 2446 } 2447 } 2448 2449 void MacroAssembler::strw(Register Rw, const Address &adr) { 2450 // We always try to merge two adjacent stores into one stp. 2451 if (!try_merge_ldst(Rw, adr, 4, true)) { 2452 Assembler::strw(Rw, adr); 2453 } 2454 } 2455 2456 // MacroAssembler routines found actually to be needed 2457 2458 void MacroAssembler::push(Register src) 2459 { 2460 str(src, Address(pre(esp, -1 * wordSize))); 2461 } 2462 2463 void MacroAssembler::pop(Register dst) 2464 { 2465 ldr(dst, Address(post(esp, 1 * wordSize))); 2466 } 2467 2468 // Note: load_unsigned_short used to be called load_unsigned_word. 2469 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2470 int off = offset(); 2471 ldrh(dst, src); 2472 return off; 2473 } 2474 2475 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2476 int off = offset(); 2477 ldrb(dst, src); 2478 return off; 2479 } 2480 2481 int MacroAssembler::load_signed_short(Register dst, Address src) { 2482 int off = offset(); 2483 ldrsh(dst, src); 2484 return off; 2485 } 2486 2487 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2488 int off = offset(); 2489 ldrsb(dst, src); 2490 return off; 2491 } 2492 2493 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2494 int off = offset(); 2495 ldrshw(dst, src); 2496 return off; 2497 } 2498 2499 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2500 int off = offset(); 2501 ldrsbw(dst, src); 2502 return off; 2503 } 2504 2505 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2506 switch (size_in_bytes) { 2507 case 8: ldr(dst, src); break; 2508 case 4: ldrw(dst, src); break; 2509 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2510 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2511 default: ShouldNotReachHere(); 2512 } 2513 } 2514 2515 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2516 switch (size_in_bytes) { 2517 case 8: str(src, dst); break; 2518 case 4: strw(src, dst); break; 2519 case 2: strh(src, dst); break; 2520 case 1: strb(src, dst); break; 2521 default: ShouldNotReachHere(); 2522 } 2523 } 2524 2525 void MacroAssembler::decrementw(Register reg, int value) 2526 { 2527 if (value < 0) { incrementw(reg, -value); return; } 2528 if (value == 0) { return; } 2529 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2530 /* else */ { 2531 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2532 movw(rscratch2, (unsigned)value); 2533 subw(reg, reg, rscratch2); 2534 } 2535 } 2536 2537 void MacroAssembler::decrement(Register reg, int value) 2538 { 2539 if (value < 0) { increment(reg, -value); return; } 2540 if (value == 0) { return; } 2541 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2542 /* else */ { 2543 assert(reg != rscratch2, "invalid dst for register decrement"); 2544 mov(rscratch2, (uint64_t)value); 2545 sub(reg, reg, rscratch2); 2546 } 2547 } 2548 2549 void MacroAssembler::decrementw(Address dst, int value) 2550 { 2551 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2552 if (dst.getMode() == Address::literal) { 2553 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2554 lea(rscratch2, dst); 2555 dst = Address(rscratch2); 2556 } 2557 ldrw(rscratch1, dst); 2558 decrementw(rscratch1, value); 2559 strw(rscratch1, dst); 2560 } 2561 2562 void MacroAssembler::decrement(Address dst, int value) 2563 { 2564 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2565 if (dst.getMode() == Address::literal) { 2566 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2567 lea(rscratch2, dst); 2568 dst = Address(rscratch2); 2569 } 2570 ldr(rscratch1, dst); 2571 decrement(rscratch1, value); 2572 str(rscratch1, dst); 2573 } 2574 2575 void MacroAssembler::incrementw(Register reg, int value) 2576 { 2577 if (value < 0) { decrementw(reg, -value); return; } 2578 if (value == 0) { return; } 2579 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2580 /* else */ { 2581 assert(reg != rscratch2, "invalid dst for register increment"); 2582 movw(rscratch2, (unsigned)value); 2583 addw(reg, reg, rscratch2); 2584 } 2585 } 2586 2587 void MacroAssembler::increment(Register reg, int value) 2588 { 2589 if (value < 0) { decrement(reg, -value); return; } 2590 if (value == 0) { return; } 2591 if (value < (1 << 12)) { add(reg, reg, value); return; } 2592 /* else */ { 2593 assert(reg != rscratch2, "invalid dst for register increment"); 2594 movw(rscratch2, (unsigned)value); 2595 add(reg, reg, rscratch2); 2596 } 2597 } 2598 2599 void MacroAssembler::incrementw(Address dst, int value) 2600 { 2601 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2602 if (dst.getMode() == Address::literal) { 2603 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2604 lea(rscratch2, dst); 2605 dst = Address(rscratch2); 2606 } 2607 ldrw(rscratch1, dst); 2608 incrementw(rscratch1, value); 2609 strw(rscratch1, dst); 2610 } 2611 2612 void MacroAssembler::increment(Address dst, int value) 2613 { 2614 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2615 if (dst.getMode() == Address::literal) { 2616 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2617 lea(rscratch2, dst); 2618 dst = Address(rscratch2); 2619 } 2620 ldr(rscratch1, dst); 2621 increment(rscratch1, value); 2622 str(rscratch1, dst); 2623 } 2624 2625 // Push lots of registers in the bit set supplied. Don't push sp. 2626 // Return the number of words pushed 2627 int MacroAssembler::push(unsigned int bitset, Register stack) { 2628 int words_pushed = 0; 2629 2630 // Scan bitset to accumulate register pairs 2631 unsigned char regs[32]; 2632 int count = 0; 2633 for (int reg = 0; reg <= 30; reg++) { 2634 if (1 & bitset) 2635 regs[count++] = reg; 2636 bitset >>= 1; 2637 } 2638 regs[count++] = zr->raw_encoding(); 2639 count &= ~1; // Only push an even number of regs 2640 2641 if (count) { 2642 stp(as_Register(regs[0]), as_Register(regs[1]), 2643 Address(pre(stack, -count * wordSize))); 2644 words_pushed += 2; 2645 } 2646 for (int i = 2; i < count; i += 2) { 2647 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2648 Address(stack, i * wordSize)); 2649 words_pushed += 2; 2650 } 2651 2652 assert(words_pushed == count, "oops, pushed != count"); 2653 2654 return count; 2655 } 2656 2657 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2658 int words_pushed = 0; 2659 2660 // Scan bitset to accumulate register pairs 2661 unsigned char regs[32]; 2662 int count = 0; 2663 for (int reg = 0; reg <= 30; reg++) { 2664 if (1 & bitset) 2665 regs[count++] = reg; 2666 bitset >>= 1; 2667 } 2668 regs[count++] = zr->raw_encoding(); 2669 count &= ~1; 2670 2671 for (int i = 2; i < count; i += 2) { 2672 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2673 Address(stack, i * wordSize)); 2674 words_pushed += 2; 2675 } 2676 if (count) { 2677 ldp(as_Register(regs[0]), as_Register(regs[1]), 2678 Address(post(stack, count * wordSize))); 2679 words_pushed += 2; 2680 } 2681 2682 assert(words_pushed == count, "oops, pushed != count"); 2683 2684 return count; 2685 } 2686 2687 // Push lots of registers in the bit set supplied. Don't push sp. 2688 // Return the number of dwords pushed 2689 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2690 int words_pushed = 0; 2691 bool use_sve = false; 2692 int sve_vector_size_in_bytes = 0; 2693 2694 #ifdef COMPILER2 2695 use_sve = Matcher::supports_scalable_vector(); 2696 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2697 #endif 2698 2699 // Scan bitset to accumulate register pairs 2700 unsigned char regs[32]; 2701 int count = 0; 2702 for (int reg = 0; reg <= 31; reg++) { 2703 if (1 & bitset) 2704 regs[count++] = reg; 2705 bitset >>= 1; 2706 } 2707 2708 if (count == 0) { 2709 return 0; 2710 } 2711 2712 if (mode == PushPopFull) { 2713 if (use_sve && sve_vector_size_in_bytes > 16) { 2714 mode = PushPopSVE; 2715 } else { 2716 mode = PushPopNeon; 2717 } 2718 } 2719 2720 #ifndef PRODUCT 2721 { 2722 char buffer[48]; 2723 if (mode == PushPopSVE) { 2724 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2725 } else if (mode == PushPopNeon) { 2726 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2727 } else { 2728 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2729 } 2730 block_comment(buffer); 2731 } 2732 #endif 2733 2734 if (mode == PushPopSVE) { 2735 sub(stack, stack, sve_vector_size_in_bytes * count); 2736 for (int i = 0; i < count; i++) { 2737 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2738 } 2739 return count * sve_vector_size_in_bytes / 8; 2740 } 2741 2742 if (mode == PushPopNeon) { 2743 if (count == 1) { 2744 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2745 return 2; 2746 } 2747 2748 bool odd = (count & 1) == 1; 2749 int push_slots = count + (odd ? 1 : 0); 2750 2751 // Always pushing full 128 bit registers. 2752 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2753 words_pushed += 2; 2754 2755 for (int i = 2; i + 1 < count; i += 2) { 2756 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2757 words_pushed += 2; 2758 } 2759 2760 if (odd) { 2761 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2762 words_pushed++; 2763 } 2764 2765 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2766 return count * 2; 2767 } 2768 2769 if (mode == PushPopFp) { 2770 bool odd = (count & 1) == 1; 2771 int push_slots = count + (odd ? 1 : 0); 2772 2773 if (count == 1) { 2774 // Stack pointer must be 16 bytes aligned 2775 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2776 return 1; 2777 } 2778 2779 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2780 words_pushed += 2; 2781 2782 for (int i = 2; i + 1 < count; i += 2) { 2783 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2784 words_pushed += 2; 2785 } 2786 2787 if (odd) { 2788 // Stack pointer must be 16 bytes aligned 2789 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2790 words_pushed++; 2791 } 2792 2793 assert(words_pushed == count, "oops, pushed != count"); 2794 2795 return count; 2796 } 2797 2798 return 0; 2799 } 2800 2801 // Return the number of dwords popped 2802 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2803 int words_pushed = 0; 2804 bool use_sve = false; 2805 int sve_vector_size_in_bytes = 0; 2806 2807 #ifdef COMPILER2 2808 use_sve = Matcher::supports_scalable_vector(); 2809 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2810 #endif 2811 // Scan bitset to accumulate register pairs 2812 unsigned char regs[32]; 2813 int count = 0; 2814 for (int reg = 0; reg <= 31; reg++) { 2815 if (1 & bitset) 2816 regs[count++] = reg; 2817 bitset >>= 1; 2818 } 2819 2820 if (count == 0) { 2821 return 0; 2822 } 2823 2824 if (mode == PushPopFull) { 2825 if (use_sve && sve_vector_size_in_bytes > 16) { 2826 mode = PushPopSVE; 2827 } else { 2828 mode = PushPopNeon; 2829 } 2830 } 2831 2832 #ifndef PRODUCT 2833 { 2834 char buffer[48]; 2835 if (mode == PushPopSVE) { 2836 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 2837 } else if (mode == PushPopNeon) { 2838 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 2839 } else { 2840 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 2841 } 2842 block_comment(buffer); 2843 } 2844 #endif 2845 2846 if (mode == PushPopSVE) { 2847 for (int i = count - 1; i >= 0; i--) { 2848 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2849 } 2850 add(stack, stack, sve_vector_size_in_bytes * count); 2851 return count * sve_vector_size_in_bytes / 8; 2852 } 2853 2854 if (mode == PushPopNeon) { 2855 if (count == 1) { 2856 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2857 return 2; 2858 } 2859 2860 bool odd = (count & 1) == 1; 2861 int push_slots = count + (odd ? 1 : 0); 2862 2863 if (odd) { 2864 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2865 words_pushed++; 2866 } 2867 2868 for (int i = 2; i + 1 < count; i += 2) { 2869 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2870 words_pushed += 2; 2871 } 2872 2873 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2874 words_pushed += 2; 2875 2876 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2877 2878 return count * 2; 2879 } 2880 2881 if (mode == PushPopFp) { 2882 bool odd = (count & 1) == 1; 2883 int push_slots = count + (odd ? 1 : 0); 2884 2885 if (count == 1) { 2886 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 2887 return 1; 2888 } 2889 2890 if (odd) { 2891 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2892 words_pushed++; 2893 } 2894 2895 for (int i = 2; i + 1 < count; i += 2) { 2896 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2897 words_pushed += 2; 2898 } 2899 2900 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 2901 words_pushed += 2; 2902 2903 assert(words_pushed == count, "oops, pushed != count"); 2904 2905 return count; 2906 } 2907 2908 return 0; 2909 } 2910 2911 // Return the number of dwords pushed 2912 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2913 bool use_sve = false; 2914 int sve_predicate_size_in_slots = 0; 2915 2916 #ifdef COMPILER2 2917 use_sve = Matcher::supports_scalable_vector(); 2918 if (use_sve) { 2919 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2920 } 2921 #endif 2922 2923 if (!use_sve) { 2924 return 0; 2925 } 2926 2927 unsigned char regs[PRegister::number_of_registers]; 2928 int count = 0; 2929 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2930 if (1 & bitset) 2931 regs[count++] = reg; 2932 bitset >>= 1; 2933 } 2934 2935 if (count == 0) { 2936 return 0; 2937 } 2938 2939 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2940 VMRegImpl::stack_slot_size * count, 16); 2941 sub(stack, stack, total_push_bytes); 2942 for (int i = 0; i < count; i++) { 2943 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2944 } 2945 return total_push_bytes / 8; 2946 } 2947 2948 // Return the number of dwords popped 2949 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2950 bool use_sve = false; 2951 int sve_predicate_size_in_slots = 0; 2952 2953 #ifdef COMPILER2 2954 use_sve = Matcher::supports_scalable_vector(); 2955 if (use_sve) { 2956 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2957 } 2958 #endif 2959 2960 if (!use_sve) { 2961 return 0; 2962 } 2963 2964 unsigned char regs[PRegister::number_of_registers]; 2965 int count = 0; 2966 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2967 if (1 & bitset) 2968 regs[count++] = reg; 2969 bitset >>= 1; 2970 } 2971 2972 if (count == 0) { 2973 return 0; 2974 } 2975 2976 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2977 VMRegImpl::stack_slot_size * count, 16); 2978 for (int i = count - 1; i >= 0; i--) { 2979 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2980 } 2981 add(stack, stack, total_pop_bytes); 2982 return total_pop_bytes / 8; 2983 } 2984 2985 #ifdef ASSERT 2986 void MacroAssembler::verify_heapbase(const char* msg) { 2987 #if 0 2988 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2989 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2990 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2991 // rheapbase is allocated as general register 2992 return; 2993 } 2994 if (CheckCompressedOops) { 2995 Label ok; 2996 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2997 cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2998 br(Assembler::EQ, ok); 2999 stop(msg); 3000 bind(ok); 3001 pop(1 << rscratch1->encoding(), sp); 3002 } 3003 #endif 3004 } 3005 #endif 3006 3007 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3008 assert_different_registers(value, tmp1, tmp2); 3009 Label done, tagged, weak_tagged; 3010 3011 cbz(value, done); // Use null as-is. 3012 tst(value, JNIHandles::tag_mask); // Test for tag. 3013 br(Assembler::NE, tagged); 3014 3015 // Resolve local handle 3016 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3017 verify_oop(value); 3018 b(done); 3019 3020 bind(tagged); 3021 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3022 tbnz(value, 0, weak_tagged); // Test for weak tag. 3023 3024 // Resolve global handle 3025 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3026 verify_oop(value); 3027 b(done); 3028 3029 bind(weak_tagged); 3030 // Resolve jweak. 3031 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3032 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3033 verify_oop(value); 3034 3035 bind(done); 3036 } 3037 3038 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3039 assert_different_registers(value, tmp1, tmp2); 3040 Label done; 3041 3042 cbz(value, done); // Use null as-is. 3043 3044 #ifdef ASSERT 3045 { 3046 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3047 Label valid_global_tag; 3048 tbnz(value, 1, valid_global_tag); // Test for global tag 3049 stop("non global jobject using resolve_global_jobject"); 3050 bind(valid_global_tag); 3051 } 3052 #endif 3053 3054 // Resolve global handle 3055 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3056 verify_oop(value); 3057 3058 bind(done); 3059 } 3060 3061 void MacroAssembler::stop(const char* msg) { 3062 BLOCK_COMMENT(msg); 3063 // load msg into r0 so we can access it from the signal handler 3064 // ExternalAddress enables saving and restoring via the code cache 3065 lea(c_rarg0, ExternalAddress((address) msg)); 3066 dcps1(0xdeae); 3067 SCCache::add_C_string(msg); 3068 } 3069 3070 void MacroAssembler::unimplemented(const char* what) { 3071 const char* buf = nullptr; 3072 { 3073 ResourceMark rm; 3074 stringStream ss; 3075 ss.print("unimplemented: %s", what); 3076 buf = code_string(ss.as_string()); 3077 } 3078 stop(buf); 3079 } 3080 3081 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3082 #ifdef ASSERT 3083 Label OK; 3084 br(cc, OK); 3085 stop(msg); 3086 bind(OK); 3087 #endif 3088 } 3089 3090 // If a constant does not fit in an immediate field, generate some 3091 // number of MOV instructions and then perform the operation. 3092 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3093 add_sub_imm_insn insn1, 3094 add_sub_reg_insn insn2, 3095 bool is32) { 3096 assert(Rd != zr, "Rd = zr and not setting flags?"); 3097 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3098 if (fits) { 3099 (this->*insn1)(Rd, Rn, imm); 3100 } else { 3101 if (uabs(imm) < (1 << 24)) { 3102 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3103 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3104 } else { 3105 assert_different_registers(Rd, Rn); 3106 mov(Rd, imm); 3107 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3108 } 3109 } 3110 } 3111 3112 // Separate vsn which sets the flags. Optimisations are more restricted 3113 // because we must set the flags correctly. 3114 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3115 add_sub_imm_insn insn1, 3116 add_sub_reg_insn insn2, 3117 bool is32) { 3118 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3119 if (fits) { 3120 (this->*insn1)(Rd, Rn, imm); 3121 } else { 3122 assert_different_registers(Rd, Rn); 3123 assert(Rd != zr, "overflow in immediate operand"); 3124 mov(Rd, imm); 3125 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3126 } 3127 } 3128 3129 3130 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3131 if (increment.is_register()) { 3132 add(Rd, Rn, increment.as_register()); 3133 } else { 3134 add(Rd, Rn, increment.as_constant()); 3135 } 3136 } 3137 3138 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3139 if (increment.is_register()) { 3140 addw(Rd, Rn, increment.as_register()); 3141 } else { 3142 addw(Rd, Rn, increment.as_constant()); 3143 } 3144 } 3145 3146 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3147 if (decrement.is_register()) { 3148 sub(Rd, Rn, decrement.as_register()); 3149 } else { 3150 sub(Rd, Rn, decrement.as_constant()); 3151 } 3152 } 3153 3154 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3155 if (decrement.is_register()) { 3156 subw(Rd, Rn, decrement.as_register()); 3157 } else { 3158 subw(Rd, Rn, decrement.as_constant()); 3159 } 3160 } 3161 3162 void MacroAssembler::reinit_heapbase() 3163 { 3164 if (UseCompressedOops) { 3165 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) { 3166 mov(rheapbase, CompressedOops::ptrs_base()); 3167 } else { 3168 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 3169 ldr(rheapbase, Address(rheapbase)); 3170 } 3171 } 3172 } 3173 3174 // this simulates the behaviour of the x86 cmpxchg instruction using a 3175 // load linked/store conditional pair. we use the acquire/release 3176 // versions of these instructions so that we flush pending writes as 3177 // per Java semantics. 3178 3179 // n.b the x86 version assumes the old value to be compared against is 3180 // in rax and updates rax with the value located in memory if the 3181 // cmpxchg fails. we supply a register for the old value explicitly 3182 3183 // the aarch64 load linked/store conditional instructions do not 3184 // accept an offset. so, unlike x86, we must provide a plain register 3185 // to identify the memory word to be compared/exchanged rather than a 3186 // register+offset Address. 3187 3188 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3189 Label &succeed, Label *fail) { 3190 // oldv holds comparison value 3191 // newv holds value to write in exchange 3192 // addr identifies memory word to compare against/update 3193 if (UseLSE) { 3194 mov(tmp, oldv); 3195 casal(Assembler::xword, oldv, newv, addr); 3196 cmp(tmp, oldv); 3197 br(Assembler::EQ, succeed); 3198 membar(AnyAny); 3199 } else { 3200 Label retry_load, nope; 3201 prfm(Address(addr), PSTL1STRM); 3202 bind(retry_load); 3203 // flush and load exclusive from the memory location 3204 // and fail if it is not what we expect 3205 ldaxr(tmp, addr); 3206 cmp(tmp, oldv); 3207 br(Assembler::NE, nope); 3208 // if we store+flush with no intervening write tmp will be zero 3209 stlxr(tmp, newv, addr); 3210 cbzw(tmp, succeed); 3211 // retry so we only ever return after a load fails to compare 3212 // ensures we don't return a stale value after a failed write. 3213 b(retry_load); 3214 // if the memory word differs we return it in oldv and signal a fail 3215 bind(nope); 3216 membar(AnyAny); 3217 mov(oldv, tmp); 3218 } 3219 if (fail) 3220 b(*fail); 3221 } 3222 3223 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3224 Label &succeed, Label *fail) { 3225 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3226 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3227 } 3228 3229 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3230 Label &succeed, Label *fail) { 3231 // oldv holds comparison value 3232 // newv holds value to write in exchange 3233 // addr identifies memory word to compare against/update 3234 // tmp returns 0/1 for success/failure 3235 if (UseLSE) { 3236 mov(tmp, oldv); 3237 casal(Assembler::word, oldv, newv, addr); 3238 cmp(tmp, oldv); 3239 br(Assembler::EQ, succeed); 3240 membar(AnyAny); 3241 } else { 3242 Label retry_load, nope; 3243 prfm(Address(addr), PSTL1STRM); 3244 bind(retry_load); 3245 // flush and load exclusive from the memory location 3246 // and fail if it is not what we expect 3247 ldaxrw(tmp, addr); 3248 cmp(tmp, oldv); 3249 br(Assembler::NE, nope); 3250 // if we store+flush with no intervening write tmp will be zero 3251 stlxrw(tmp, newv, addr); 3252 cbzw(tmp, succeed); 3253 // retry so we only ever return after a load fails to compare 3254 // ensures we don't return a stale value after a failed write. 3255 b(retry_load); 3256 // if the memory word differs we return it in oldv and signal a fail 3257 bind(nope); 3258 membar(AnyAny); 3259 mov(oldv, tmp); 3260 } 3261 if (fail) 3262 b(*fail); 3263 } 3264 3265 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3266 // doesn't retry and may fail spuriously. If the oldval is wanted, 3267 // Pass a register for the result, otherwise pass noreg. 3268 3269 // Clobbers rscratch1 3270 void MacroAssembler::cmpxchg(Register addr, Register expected, 3271 Register new_val, 3272 enum operand_size size, 3273 bool acquire, bool release, 3274 bool weak, 3275 Register result) { 3276 if (result == noreg) result = rscratch1; 3277 BLOCK_COMMENT("cmpxchg {"); 3278 if (UseLSE) { 3279 mov(result, expected); 3280 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3281 compare_eq(result, expected, size); 3282 #ifdef ASSERT 3283 // Poison rscratch1 which is written on !UseLSE branch 3284 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3285 #endif 3286 } else { 3287 Label retry_load, done; 3288 prfm(Address(addr), PSTL1STRM); 3289 bind(retry_load); 3290 load_exclusive(result, addr, size, acquire); 3291 compare_eq(result, expected, size); 3292 br(Assembler::NE, done); 3293 store_exclusive(rscratch1, new_val, addr, size, release); 3294 if (weak) { 3295 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3296 } else { 3297 cbnzw(rscratch1, retry_load); 3298 } 3299 bind(done); 3300 } 3301 BLOCK_COMMENT("} cmpxchg"); 3302 } 3303 3304 // A generic comparison. Only compares for equality, clobbers rscratch1. 3305 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3306 if (size == xword) { 3307 cmp(rm, rn); 3308 } else if (size == word) { 3309 cmpw(rm, rn); 3310 } else if (size == halfword) { 3311 eorw(rscratch1, rm, rn); 3312 ands(zr, rscratch1, 0xffff); 3313 } else if (size == byte) { 3314 eorw(rscratch1, rm, rn); 3315 ands(zr, rscratch1, 0xff); 3316 } else { 3317 ShouldNotReachHere(); 3318 } 3319 } 3320 3321 3322 static bool different(Register a, RegisterOrConstant b, Register c) { 3323 if (b.is_constant()) 3324 return a != c; 3325 else 3326 return a != b.as_register() && a != c && b.as_register() != c; 3327 } 3328 3329 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3330 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3331 if (UseLSE) { \ 3332 prev = prev->is_valid() ? prev : zr; \ 3333 if (incr.is_register()) { \ 3334 AOP(sz, incr.as_register(), prev, addr); \ 3335 } else { \ 3336 mov(rscratch2, incr.as_constant()); \ 3337 AOP(sz, rscratch2, prev, addr); \ 3338 } \ 3339 return; \ 3340 } \ 3341 Register result = rscratch2; \ 3342 if (prev->is_valid()) \ 3343 result = different(prev, incr, addr) ? prev : rscratch2; \ 3344 \ 3345 Label retry_load; \ 3346 prfm(Address(addr), PSTL1STRM); \ 3347 bind(retry_load); \ 3348 LDXR(result, addr); \ 3349 OP(rscratch1, result, incr); \ 3350 STXR(rscratch2, rscratch1, addr); \ 3351 cbnzw(rscratch2, retry_load); \ 3352 if (prev->is_valid() && prev != result) { \ 3353 IOP(prev, rscratch1, incr); \ 3354 } \ 3355 } 3356 3357 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3358 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3359 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3360 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3361 3362 #undef ATOMIC_OP 3363 3364 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3365 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3366 if (UseLSE) { \ 3367 prev = prev->is_valid() ? prev : zr; \ 3368 AOP(sz, newv, prev, addr); \ 3369 return; \ 3370 } \ 3371 Register result = rscratch2; \ 3372 if (prev->is_valid()) \ 3373 result = different(prev, newv, addr) ? prev : rscratch2; \ 3374 \ 3375 Label retry_load; \ 3376 prfm(Address(addr), PSTL1STRM); \ 3377 bind(retry_load); \ 3378 LDXR(result, addr); \ 3379 STXR(rscratch1, newv, addr); \ 3380 cbnzw(rscratch1, retry_load); \ 3381 if (prev->is_valid() && prev != result) \ 3382 mov(prev, result); \ 3383 } 3384 3385 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3386 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3387 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3388 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3389 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3390 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3391 3392 #undef ATOMIC_XCHG 3393 3394 #ifndef PRODUCT 3395 extern "C" void findpc(intptr_t x); 3396 #endif 3397 3398 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3399 { 3400 // In order to get locks to work, we need to fake a in_VM state 3401 if (ShowMessageBoxOnError ) { 3402 JavaThread* thread = JavaThread::current(); 3403 JavaThreadState saved_state = thread->thread_state(); 3404 thread->set_thread_state(_thread_in_vm); 3405 #ifndef PRODUCT 3406 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3407 ttyLocker ttyl; 3408 BytecodeCounter::print(); 3409 } 3410 #endif 3411 if (os::message_box(msg, "Execution stopped, print registers?")) { 3412 ttyLocker ttyl; 3413 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3414 #ifndef PRODUCT 3415 tty->cr(); 3416 findpc(pc); 3417 tty->cr(); 3418 #endif 3419 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3420 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3421 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3422 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3423 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3424 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3425 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3426 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3427 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3428 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3429 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3430 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3431 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3432 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3433 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3434 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3435 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3436 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3437 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3438 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3439 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3440 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3441 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3442 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3443 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3444 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3445 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3446 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3447 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3448 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3449 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3450 BREAKPOINT; 3451 } 3452 } 3453 fatal("DEBUG MESSAGE: %s", msg); 3454 } 3455 3456 RegSet MacroAssembler::call_clobbered_gp_registers() { 3457 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3458 #ifndef R18_RESERVED 3459 regs += r18_tls; 3460 #endif 3461 return regs; 3462 } 3463 3464 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3465 int step = 4 * wordSize; 3466 push(call_clobbered_gp_registers() - exclude, sp); 3467 sub(sp, sp, step); 3468 mov(rscratch1, -step); 3469 // Push v0-v7, v16-v31. 3470 for (int i = 31; i>= 4; i -= 4) { 3471 if (i <= v7->encoding() || i >= v16->encoding()) 3472 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3473 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3474 } 3475 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3476 as_FloatRegister(3), T1D, Address(sp)); 3477 } 3478 3479 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3480 for (int i = 0; i < 32; i += 4) { 3481 if (i <= v7->encoding() || i >= v16->encoding()) 3482 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3483 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3484 } 3485 3486 reinitialize_ptrue(); 3487 3488 pop(call_clobbered_gp_registers() - exclude, sp); 3489 } 3490 3491 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3492 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3493 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3494 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3495 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3496 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3497 sve_str(as_FloatRegister(i), Address(sp, i)); 3498 } 3499 } else { 3500 int step = (save_vectors ? 8 : 4) * wordSize; 3501 mov(rscratch1, -step); 3502 sub(sp, sp, step); 3503 for (int i = 28; i >= 4; i -= 4) { 3504 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3505 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3506 } 3507 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3508 } 3509 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3510 sub(sp, sp, total_predicate_in_bytes); 3511 for (int i = 0; i < PRegister::number_of_registers; i++) { 3512 sve_str(as_PRegister(i), Address(sp, i)); 3513 } 3514 } 3515 } 3516 3517 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3518 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3519 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3520 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3521 sve_ldr(as_PRegister(i), Address(sp, i)); 3522 } 3523 add(sp, sp, total_predicate_in_bytes); 3524 } 3525 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3526 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3527 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3528 } 3529 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3530 } else { 3531 int step = (restore_vectors ? 8 : 4) * wordSize; 3532 for (int i = 0; i <= 28; i += 4) 3533 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3534 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3535 } 3536 3537 // We may use predicate registers and rely on ptrue with SVE, 3538 // regardless of wide vector (> 8 bytes) used or not. 3539 if (use_sve) { 3540 reinitialize_ptrue(); 3541 } 3542 3543 // integer registers except lr & sp 3544 pop(RegSet::range(r0, r17), sp); 3545 #ifdef R18_RESERVED 3546 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3547 pop(RegSet::range(r20, r29), sp); 3548 #else 3549 pop(RegSet::range(r18_tls, r29), sp); 3550 #endif 3551 } 3552 3553 /** 3554 * Helpers for multiply_to_len(). 3555 */ 3556 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3557 Register src1, Register src2) { 3558 adds(dest_lo, dest_lo, src1); 3559 adc(dest_hi, dest_hi, zr); 3560 adds(dest_lo, dest_lo, src2); 3561 adc(final_dest_hi, dest_hi, zr); 3562 } 3563 3564 // Generate an address from (r + r1 extend offset). "size" is the 3565 // size of the operand. The result may be in rscratch2. 3566 Address MacroAssembler::offsetted_address(Register r, Register r1, 3567 Address::extend ext, int offset, int size) { 3568 if (offset || (ext.shift() % size != 0)) { 3569 lea(rscratch2, Address(r, r1, ext)); 3570 return Address(rscratch2, offset); 3571 } else { 3572 return Address(r, r1, ext); 3573 } 3574 } 3575 3576 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3577 { 3578 assert(offset >= 0, "spill to negative address?"); 3579 // Offset reachable ? 3580 // Not aligned - 9 bits signed offset 3581 // Aligned - 12 bits unsigned offset shifted 3582 Register base = sp; 3583 if ((offset & (size-1)) && offset >= (1<<8)) { 3584 add(tmp, base, offset & ((1<<12)-1)); 3585 base = tmp; 3586 offset &= -1u<<12; 3587 } 3588 3589 if (offset >= (1<<12) * size) { 3590 add(tmp, base, offset & (((1<<12)-1)<<12)); 3591 base = tmp; 3592 offset &= ~(((1<<12)-1)<<12); 3593 } 3594 3595 return Address(base, offset); 3596 } 3597 3598 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3599 assert(offset >= 0, "spill to negative address?"); 3600 3601 Register base = sp; 3602 3603 // An immediate offset in the range 0 to 255 which is multiplied 3604 // by the current vector or predicate register size in bytes. 3605 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3606 return Address(base, offset / sve_reg_size_in_bytes); 3607 } 3608 3609 add(tmp, base, offset); 3610 return Address(tmp); 3611 } 3612 3613 // Checks whether offset is aligned. 3614 // Returns true if it is, else false. 3615 bool MacroAssembler::merge_alignment_check(Register base, 3616 size_t size, 3617 int64_t cur_offset, 3618 int64_t prev_offset) const { 3619 if (AvoidUnalignedAccesses) { 3620 if (base == sp) { 3621 // Checks whether low offset if aligned to pair of registers. 3622 int64_t pair_mask = size * 2 - 1; 3623 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3624 return (offset & pair_mask) == 0; 3625 } else { // If base is not sp, we can't guarantee the access is aligned. 3626 return false; 3627 } 3628 } else { 3629 int64_t mask = size - 1; 3630 // Load/store pair instruction only supports element size aligned offset. 3631 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3632 } 3633 } 3634 3635 // Checks whether current and previous loads/stores can be merged. 3636 // Returns true if it can be merged, else false. 3637 bool MacroAssembler::ldst_can_merge(Register rt, 3638 const Address &adr, 3639 size_t cur_size_in_bytes, 3640 bool is_store) const { 3641 address prev = pc() - NativeInstruction::instruction_size; 3642 address last = code()->last_insn(); 3643 3644 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3645 return false; 3646 } 3647 3648 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3649 return false; 3650 } 3651 3652 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3653 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3654 3655 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3656 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3657 3658 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3659 return false; 3660 } 3661 3662 int64_t max_offset = 63 * prev_size_in_bytes; 3663 int64_t min_offset = -64 * prev_size_in_bytes; 3664 3665 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3666 3667 // Only same base can be merged. 3668 if (adr.base() != prev_ldst->base()) { 3669 return false; 3670 } 3671 3672 int64_t cur_offset = adr.offset(); 3673 int64_t prev_offset = prev_ldst->offset(); 3674 size_t diff = abs(cur_offset - prev_offset); 3675 if (diff != prev_size_in_bytes) { 3676 return false; 3677 } 3678 3679 // Following cases can not be merged: 3680 // ldr x2, [x2, #8] 3681 // ldr x3, [x2, #16] 3682 // or: 3683 // ldr x2, [x3, #8] 3684 // ldr x2, [x3, #16] 3685 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3686 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3687 return false; 3688 } 3689 3690 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3691 // Offset range must be in ldp/stp instruction's range. 3692 if (low_offset > max_offset || low_offset < min_offset) { 3693 return false; 3694 } 3695 3696 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3697 return true; 3698 } 3699 3700 return false; 3701 } 3702 3703 // Merge current load/store with previous load/store into ldp/stp. 3704 void MacroAssembler::merge_ldst(Register rt, 3705 const Address &adr, 3706 size_t cur_size_in_bytes, 3707 bool is_store) { 3708 3709 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3710 3711 Register rt_low, rt_high; 3712 address prev = pc() - NativeInstruction::instruction_size; 3713 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3714 3715 int64_t offset; 3716 3717 if (adr.offset() < prev_ldst->offset()) { 3718 offset = adr.offset(); 3719 rt_low = rt; 3720 rt_high = prev_ldst->target(); 3721 } else { 3722 offset = prev_ldst->offset(); 3723 rt_low = prev_ldst->target(); 3724 rt_high = rt; 3725 } 3726 3727 Address adr_p = Address(prev_ldst->base(), offset); 3728 // Overwrite previous generated binary. 3729 code_section()->set_end(prev); 3730 3731 const size_t sz = prev_ldst->size_in_bytes(); 3732 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3733 if (!is_store) { 3734 BLOCK_COMMENT("merged ldr pair"); 3735 if (sz == 8) { 3736 ldp(rt_low, rt_high, adr_p); 3737 } else { 3738 ldpw(rt_low, rt_high, adr_p); 3739 } 3740 } else { 3741 BLOCK_COMMENT("merged str pair"); 3742 if (sz == 8) { 3743 stp(rt_low, rt_high, adr_p); 3744 } else { 3745 stpw(rt_low, rt_high, adr_p); 3746 } 3747 } 3748 } 3749 3750 /** 3751 * Multiply 64 bit by 64 bit first loop. 3752 */ 3753 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3754 Register y, Register y_idx, Register z, 3755 Register carry, Register product, 3756 Register idx, Register kdx) { 3757 // 3758 // jlong carry, x[], y[], z[]; 3759 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3760 // huge_128 product = y[idx] * x[xstart] + carry; 3761 // z[kdx] = (jlong)product; 3762 // carry = (jlong)(product >>> 64); 3763 // } 3764 // z[xstart] = carry; 3765 // 3766 3767 Label L_first_loop, L_first_loop_exit; 3768 Label L_one_x, L_one_y, L_multiply; 3769 3770 subsw(xstart, xstart, 1); 3771 br(Assembler::MI, L_one_x); 3772 3773 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3774 ldr(x_xstart, Address(rscratch1)); 3775 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3776 3777 bind(L_first_loop); 3778 subsw(idx, idx, 1); 3779 br(Assembler::MI, L_first_loop_exit); 3780 subsw(idx, idx, 1); 3781 br(Assembler::MI, L_one_y); 3782 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3783 ldr(y_idx, Address(rscratch1)); 3784 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3785 bind(L_multiply); 3786 3787 // AArch64 has a multiply-accumulate instruction that we can't use 3788 // here because it has no way to process carries, so we have to use 3789 // separate add and adc instructions. Bah. 3790 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3791 mul(product, x_xstart, y_idx); 3792 adds(product, product, carry); 3793 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3794 3795 subw(kdx, kdx, 2); 3796 ror(product, product, 32); // back to big-endian 3797 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3798 3799 b(L_first_loop); 3800 3801 bind(L_one_y); 3802 ldrw(y_idx, Address(y, 0)); 3803 b(L_multiply); 3804 3805 bind(L_one_x); 3806 ldrw(x_xstart, Address(x, 0)); 3807 b(L_first_loop); 3808 3809 bind(L_first_loop_exit); 3810 } 3811 3812 /** 3813 * Multiply 128 bit by 128. Unrolled inner loop. 3814 * 3815 */ 3816 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3817 Register carry, Register carry2, 3818 Register idx, Register jdx, 3819 Register yz_idx1, Register yz_idx2, 3820 Register tmp, Register tmp3, Register tmp4, 3821 Register tmp6, Register product_hi) { 3822 3823 // jlong carry, x[], y[], z[]; 3824 // int kdx = ystart+1; 3825 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3826 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3827 // jlong carry2 = (jlong)(tmp3 >>> 64); 3828 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3829 // carry = (jlong)(tmp4 >>> 64); 3830 // z[kdx+idx+1] = (jlong)tmp3; 3831 // z[kdx+idx] = (jlong)tmp4; 3832 // } 3833 // idx += 2; 3834 // if (idx > 0) { 3835 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3836 // z[kdx+idx] = (jlong)yz_idx1; 3837 // carry = (jlong)(yz_idx1 >>> 64); 3838 // } 3839 // 3840 3841 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3842 3843 lsrw(jdx, idx, 2); 3844 3845 bind(L_third_loop); 3846 3847 subsw(jdx, jdx, 1); 3848 br(Assembler::MI, L_third_loop_exit); 3849 subw(idx, idx, 4); 3850 3851 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3852 3853 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3854 3855 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3856 3857 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3858 ror(yz_idx2, yz_idx2, 32); 3859 3860 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3861 3862 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3863 umulh(tmp4, product_hi, yz_idx1); 3864 3865 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3866 ror(rscratch2, rscratch2, 32); 3867 3868 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3869 umulh(carry2, product_hi, yz_idx2); 3870 3871 // propagate sum of both multiplications into carry:tmp4:tmp3 3872 adds(tmp3, tmp3, carry); 3873 adc(tmp4, tmp4, zr); 3874 adds(tmp3, tmp3, rscratch1); 3875 adcs(tmp4, tmp4, tmp); 3876 adc(carry, carry2, zr); 3877 adds(tmp4, tmp4, rscratch2); 3878 adc(carry, carry, zr); 3879 3880 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3881 ror(tmp4, tmp4, 32); 3882 stp(tmp4, tmp3, Address(tmp6, 0)); 3883 3884 b(L_third_loop); 3885 bind (L_third_loop_exit); 3886 3887 andw (idx, idx, 0x3); 3888 cbz(idx, L_post_third_loop_done); 3889 3890 Label L_check_1; 3891 subsw(idx, idx, 2); 3892 br(Assembler::MI, L_check_1); 3893 3894 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3895 ldr(yz_idx1, Address(rscratch1, 0)); 3896 ror(yz_idx1, yz_idx1, 32); 3897 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3898 umulh(tmp4, product_hi, yz_idx1); 3899 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3900 ldr(yz_idx2, Address(rscratch1, 0)); 3901 ror(yz_idx2, yz_idx2, 32); 3902 3903 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3904 3905 ror(tmp3, tmp3, 32); 3906 str(tmp3, Address(rscratch1, 0)); 3907 3908 bind (L_check_1); 3909 3910 andw (idx, idx, 0x1); 3911 subsw(idx, idx, 1); 3912 br(Assembler::MI, L_post_third_loop_done); 3913 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3914 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3915 umulh(carry2, tmp4, product_hi); 3916 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3917 3918 add2_with_carry(carry2, tmp3, tmp4, carry); 3919 3920 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3921 extr(carry, carry2, tmp3, 32); 3922 3923 bind(L_post_third_loop_done); 3924 } 3925 3926 /** 3927 * Code for BigInteger::multiplyToLen() intrinsic. 3928 * 3929 * r0: x 3930 * r1: xlen 3931 * r2: y 3932 * r3: ylen 3933 * r4: z 3934 * r5: tmp0 3935 * r10: tmp1 3936 * r11: tmp2 3937 * r12: tmp3 3938 * r13: tmp4 3939 * r14: tmp5 3940 * r15: tmp6 3941 * r16: tmp7 3942 * 3943 */ 3944 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3945 Register z, Register tmp0, 3946 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3947 Register tmp5, Register tmp6, Register product_hi) { 3948 3949 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 3950 3951 const Register idx = tmp1; 3952 const Register kdx = tmp2; 3953 const Register xstart = tmp3; 3954 3955 const Register y_idx = tmp4; 3956 const Register carry = tmp5; 3957 const Register product = xlen; 3958 const Register x_xstart = tmp0; 3959 3960 // First Loop. 3961 // 3962 // final static long LONG_MASK = 0xffffffffL; 3963 // int xstart = xlen - 1; 3964 // int ystart = ylen - 1; 3965 // long carry = 0; 3966 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3967 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3968 // z[kdx] = (int)product; 3969 // carry = product >>> 32; 3970 // } 3971 // z[xstart] = (int)carry; 3972 // 3973 3974 movw(idx, ylen); // idx = ylen; 3975 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 3976 mov(carry, zr); // carry = 0; 3977 3978 Label L_done; 3979 3980 movw(xstart, xlen); 3981 subsw(xstart, xstart, 1); 3982 br(Assembler::MI, L_done); 3983 3984 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3985 3986 Label L_second_loop; 3987 cbzw(kdx, L_second_loop); 3988 3989 Label L_carry; 3990 subw(kdx, kdx, 1); 3991 cbzw(kdx, L_carry); 3992 3993 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3994 lsr(carry, carry, 32); 3995 subw(kdx, kdx, 1); 3996 3997 bind(L_carry); 3998 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3999 4000 // Second and third (nested) loops. 4001 // 4002 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4003 // carry = 0; 4004 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4005 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4006 // (z[k] & LONG_MASK) + carry; 4007 // z[k] = (int)product; 4008 // carry = product >>> 32; 4009 // } 4010 // z[i] = (int)carry; 4011 // } 4012 // 4013 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 4014 4015 const Register jdx = tmp1; 4016 4017 bind(L_second_loop); 4018 mov(carry, zr); // carry = 0; 4019 movw(jdx, ylen); // j = ystart+1 4020 4021 subsw(xstart, xstart, 1); // i = xstart-1; 4022 br(Assembler::MI, L_done); 4023 4024 str(z, Address(pre(sp, -4 * wordSize))); 4025 4026 Label L_last_x; 4027 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4028 subsw(xstart, xstart, 1); // i = xstart-1; 4029 br(Assembler::MI, L_last_x); 4030 4031 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4032 ldr(product_hi, Address(rscratch1)); 4033 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4034 4035 Label L_third_loop_prologue; 4036 bind(L_third_loop_prologue); 4037 4038 str(ylen, Address(sp, wordSize)); 4039 stp(x, xstart, Address(sp, 2 * wordSize)); 4040 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4041 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4042 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4043 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4044 4045 addw(tmp3, xlen, 1); 4046 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4047 subsw(tmp3, tmp3, 1); 4048 br(Assembler::MI, L_done); 4049 4050 lsr(carry, carry, 32); 4051 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4052 b(L_second_loop); 4053 4054 // Next infrequent code is moved outside loops. 4055 bind(L_last_x); 4056 ldrw(product_hi, Address(x, 0)); 4057 b(L_third_loop_prologue); 4058 4059 bind(L_done); 4060 } 4061 4062 // Code for BigInteger::mulAdd intrinsic 4063 // out = r0 4064 // in = r1 4065 // offset = r2 (already out.length-offset) 4066 // len = r3 4067 // k = r4 4068 // 4069 // pseudo code from java implementation: 4070 // carry = 0; 4071 // offset = out.length-offset - 1; 4072 // for (int j=len-1; j >= 0; j--) { 4073 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4074 // out[offset--] = (int)product; 4075 // carry = product >>> 32; 4076 // } 4077 // return (int)carry; 4078 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4079 Register len, Register k) { 4080 Label LOOP, END; 4081 // pre-loop 4082 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4083 csel(out, zr, out, Assembler::EQ); 4084 br(Assembler::EQ, END); 4085 add(in, in, len, LSL, 2); // in[j+1] address 4086 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4087 mov(out, zr); // used to keep carry now 4088 BIND(LOOP); 4089 ldrw(rscratch1, Address(pre(in, -4))); 4090 madd(rscratch1, rscratch1, k, out); 4091 ldrw(rscratch2, Address(pre(offset, -4))); 4092 add(rscratch1, rscratch1, rscratch2); 4093 strw(rscratch1, Address(offset)); 4094 lsr(out, rscratch1, 32); 4095 subs(len, len, 1); 4096 br(Assembler::NE, LOOP); 4097 BIND(END); 4098 } 4099 4100 /** 4101 * Emits code to update CRC-32 with a byte value according to constants in table 4102 * 4103 * @param [in,out]crc Register containing the crc. 4104 * @param [in]val Register containing the byte to fold into the CRC. 4105 * @param [in]table Register containing the table of crc constants. 4106 * 4107 * uint32_t crc; 4108 * val = crc_table[(val ^ crc) & 0xFF]; 4109 * crc = val ^ (crc >> 8); 4110 * 4111 */ 4112 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4113 eor(val, val, crc); 4114 andr(val, val, 0xff); 4115 ldrw(val, Address(table, val, Address::lsl(2))); 4116 eor(crc, val, crc, Assembler::LSR, 8); 4117 } 4118 4119 /** 4120 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4121 * 4122 * @param [in,out]crc Register containing the crc. 4123 * @param [in]v Register containing the 32-bit to fold into the CRC. 4124 * @param [in]table0 Register containing table 0 of crc constants. 4125 * @param [in]table1 Register containing table 1 of crc constants. 4126 * @param [in]table2 Register containing table 2 of crc constants. 4127 * @param [in]table3 Register containing table 3 of crc constants. 4128 * 4129 * uint32_t crc; 4130 * v = crc ^ v 4131 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4132 * 4133 */ 4134 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4135 Register table0, Register table1, Register table2, Register table3, 4136 bool upper) { 4137 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4138 uxtb(tmp, v); 4139 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4140 ubfx(tmp, v, 8, 8); 4141 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4142 eor(crc, crc, tmp); 4143 ubfx(tmp, v, 16, 8); 4144 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4145 eor(crc, crc, tmp); 4146 ubfx(tmp, v, 24, 8); 4147 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4148 eor(crc, crc, tmp); 4149 } 4150 4151 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4152 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4153 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4154 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4155 4156 subs(tmp0, len, 384); 4157 mvnw(crc, crc); 4158 br(Assembler::GE, CRC_by128_pre); 4159 BIND(CRC_less128); 4160 subs(len, len, 32); 4161 br(Assembler::GE, CRC_by32_loop); 4162 BIND(CRC_less32); 4163 adds(len, len, 32 - 4); 4164 br(Assembler::GE, CRC_by4_loop); 4165 adds(len, len, 4); 4166 br(Assembler::GT, CRC_by1_loop); 4167 b(L_exit); 4168 4169 BIND(CRC_by32_loop); 4170 ldp(tmp0, tmp1, Address(buf)); 4171 crc32x(crc, crc, tmp0); 4172 ldp(tmp2, tmp3, Address(buf, 16)); 4173 crc32x(crc, crc, tmp1); 4174 add(buf, buf, 32); 4175 crc32x(crc, crc, tmp2); 4176 subs(len, len, 32); 4177 crc32x(crc, crc, tmp3); 4178 br(Assembler::GE, CRC_by32_loop); 4179 cmn(len, (u1)32); 4180 br(Assembler::NE, CRC_less32); 4181 b(L_exit); 4182 4183 BIND(CRC_by4_loop); 4184 ldrw(tmp0, Address(post(buf, 4))); 4185 subs(len, len, 4); 4186 crc32w(crc, crc, tmp0); 4187 br(Assembler::GE, CRC_by4_loop); 4188 adds(len, len, 4); 4189 br(Assembler::LE, L_exit); 4190 BIND(CRC_by1_loop); 4191 ldrb(tmp0, Address(post(buf, 1))); 4192 subs(len, len, 1); 4193 crc32b(crc, crc, tmp0); 4194 br(Assembler::GT, CRC_by1_loop); 4195 b(L_exit); 4196 4197 BIND(CRC_by128_pre); 4198 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4199 4*256*sizeof(juint) + 8*sizeof(juint)); 4200 mov(crc, 0); 4201 crc32x(crc, crc, tmp0); 4202 crc32x(crc, crc, tmp1); 4203 4204 cbnz(len, CRC_less128); 4205 4206 BIND(L_exit); 4207 mvnw(crc, crc); 4208 } 4209 4210 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4211 Register len, Register tmp0, Register tmp1, Register tmp2, 4212 Register tmp3) { 4213 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4214 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4215 4216 mvnw(crc, crc); 4217 4218 subs(len, len, 128); 4219 br(Assembler::GE, CRC_by64_pre); 4220 BIND(CRC_less64); 4221 adds(len, len, 128-32); 4222 br(Assembler::GE, CRC_by32_loop); 4223 BIND(CRC_less32); 4224 adds(len, len, 32-4); 4225 br(Assembler::GE, CRC_by4_loop); 4226 adds(len, len, 4); 4227 br(Assembler::GT, CRC_by1_loop); 4228 b(L_exit); 4229 4230 BIND(CRC_by32_loop); 4231 ldp(tmp0, tmp1, Address(post(buf, 16))); 4232 subs(len, len, 32); 4233 crc32x(crc, crc, tmp0); 4234 ldr(tmp2, Address(post(buf, 8))); 4235 crc32x(crc, crc, tmp1); 4236 ldr(tmp3, Address(post(buf, 8))); 4237 crc32x(crc, crc, tmp2); 4238 crc32x(crc, crc, tmp3); 4239 br(Assembler::GE, CRC_by32_loop); 4240 cmn(len, (u1)32); 4241 br(Assembler::NE, CRC_less32); 4242 b(L_exit); 4243 4244 BIND(CRC_by4_loop); 4245 ldrw(tmp0, Address(post(buf, 4))); 4246 subs(len, len, 4); 4247 crc32w(crc, crc, tmp0); 4248 br(Assembler::GE, CRC_by4_loop); 4249 adds(len, len, 4); 4250 br(Assembler::LE, L_exit); 4251 BIND(CRC_by1_loop); 4252 ldrb(tmp0, Address(post(buf, 1))); 4253 subs(len, len, 1); 4254 crc32b(crc, crc, tmp0); 4255 br(Assembler::GT, CRC_by1_loop); 4256 b(L_exit); 4257 4258 BIND(CRC_by64_pre); 4259 sub(buf, buf, 8); 4260 ldp(tmp0, tmp1, Address(buf, 8)); 4261 crc32x(crc, crc, tmp0); 4262 ldr(tmp2, Address(buf, 24)); 4263 crc32x(crc, crc, tmp1); 4264 ldr(tmp3, Address(buf, 32)); 4265 crc32x(crc, crc, tmp2); 4266 ldr(tmp0, Address(buf, 40)); 4267 crc32x(crc, crc, tmp3); 4268 ldr(tmp1, Address(buf, 48)); 4269 crc32x(crc, crc, tmp0); 4270 ldr(tmp2, Address(buf, 56)); 4271 crc32x(crc, crc, tmp1); 4272 ldr(tmp3, Address(pre(buf, 64))); 4273 4274 b(CRC_by64_loop); 4275 4276 align(CodeEntryAlignment); 4277 BIND(CRC_by64_loop); 4278 subs(len, len, 64); 4279 crc32x(crc, crc, tmp2); 4280 ldr(tmp0, Address(buf, 8)); 4281 crc32x(crc, crc, tmp3); 4282 ldr(tmp1, Address(buf, 16)); 4283 crc32x(crc, crc, tmp0); 4284 ldr(tmp2, Address(buf, 24)); 4285 crc32x(crc, crc, tmp1); 4286 ldr(tmp3, Address(buf, 32)); 4287 crc32x(crc, crc, tmp2); 4288 ldr(tmp0, Address(buf, 40)); 4289 crc32x(crc, crc, tmp3); 4290 ldr(tmp1, Address(buf, 48)); 4291 crc32x(crc, crc, tmp0); 4292 ldr(tmp2, Address(buf, 56)); 4293 crc32x(crc, crc, tmp1); 4294 ldr(tmp3, Address(pre(buf, 64))); 4295 br(Assembler::GE, CRC_by64_loop); 4296 4297 // post-loop 4298 crc32x(crc, crc, tmp2); 4299 crc32x(crc, crc, tmp3); 4300 4301 sub(len, len, 64); 4302 add(buf, buf, 8); 4303 cmn(len, (u1)128); 4304 br(Assembler::NE, CRC_less64); 4305 BIND(L_exit); 4306 mvnw(crc, crc); 4307 } 4308 4309 /** 4310 * @param crc register containing existing CRC (32-bit) 4311 * @param buf register pointing to input byte buffer (byte*) 4312 * @param len register containing number of bytes 4313 * @param table register that will contain address of CRC table 4314 * @param tmp scratch register 4315 */ 4316 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4317 Register table0, Register table1, Register table2, Register table3, 4318 Register tmp, Register tmp2, Register tmp3) { 4319 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4320 4321 if (UseCryptoPmullForCRC32) { 4322 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4323 return; 4324 } 4325 4326 if (UseCRC32) { 4327 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4328 return; 4329 } 4330 4331 mvnw(crc, crc); 4332 4333 { 4334 uint64_t offset; 4335 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4336 add(table0, table0, offset); 4337 } 4338 add(table1, table0, 1*256*sizeof(juint)); 4339 add(table2, table0, 2*256*sizeof(juint)); 4340 add(table3, table0, 3*256*sizeof(juint)); 4341 4342 { // Neon code start 4343 cmp(len, (u1)64); 4344 br(Assembler::LT, L_by16); 4345 eor(v16, T16B, v16, v16); 4346 4347 Label L_fold; 4348 4349 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4350 4351 ld1(v0, v1, T2D, post(buf, 32)); 4352 ld1r(v4, T2D, post(tmp, 8)); 4353 ld1r(v5, T2D, post(tmp, 8)); 4354 ld1r(v6, T2D, post(tmp, 8)); 4355 ld1r(v7, T2D, post(tmp, 8)); 4356 mov(v16, S, 0, crc); 4357 4358 eor(v0, T16B, v0, v16); 4359 sub(len, len, 64); 4360 4361 BIND(L_fold); 4362 pmull(v22, T8H, v0, v5, T8B); 4363 pmull(v20, T8H, v0, v7, T8B); 4364 pmull(v23, T8H, v0, v4, T8B); 4365 pmull(v21, T8H, v0, v6, T8B); 4366 4367 pmull2(v18, T8H, v0, v5, T16B); 4368 pmull2(v16, T8H, v0, v7, T16B); 4369 pmull2(v19, T8H, v0, v4, T16B); 4370 pmull2(v17, T8H, v0, v6, T16B); 4371 4372 uzp1(v24, T8H, v20, v22); 4373 uzp2(v25, T8H, v20, v22); 4374 eor(v20, T16B, v24, v25); 4375 4376 uzp1(v26, T8H, v16, v18); 4377 uzp2(v27, T8H, v16, v18); 4378 eor(v16, T16B, v26, v27); 4379 4380 ushll2(v22, T4S, v20, T8H, 8); 4381 ushll(v20, T4S, v20, T4H, 8); 4382 4383 ushll2(v18, T4S, v16, T8H, 8); 4384 ushll(v16, T4S, v16, T4H, 8); 4385 4386 eor(v22, T16B, v23, v22); 4387 eor(v18, T16B, v19, v18); 4388 eor(v20, T16B, v21, v20); 4389 eor(v16, T16B, v17, v16); 4390 4391 uzp1(v17, T2D, v16, v20); 4392 uzp2(v21, T2D, v16, v20); 4393 eor(v17, T16B, v17, v21); 4394 4395 ushll2(v20, T2D, v17, T4S, 16); 4396 ushll(v16, T2D, v17, T2S, 16); 4397 4398 eor(v20, T16B, v20, v22); 4399 eor(v16, T16B, v16, v18); 4400 4401 uzp1(v17, T2D, v20, v16); 4402 uzp2(v21, T2D, v20, v16); 4403 eor(v28, T16B, v17, v21); 4404 4405 pmull(v22, T8H, v1, v5, T8B); 4406 pmull(v20, T8H, v1, v7, T8B); 4407 pmull(v23, T8H, v1, v4, T8B); 4408 pmull(v21, T8H, v1, v6, T8B); 4409 4410 pmull2(v18, T8H, v1, v5, T16B); 4411 pmull2(v16, T8H, v1, v7, T16B); 4412 pmull2(v19, T8H, v1, v4, T16B); 4413 pmull2(v17, T8H, v1, v6, T16B); 4414 4415 ld1(v0, v1, T2D, post(buf, 32)); 4416 4417 uzp1(v24, T8H, v20, v22); 4418 uzp2(v25, T8H, v20, v22); 4419 eor(v20, T16B, v24, v25); 4420 4421 uzp1(v26, T8H, v16, v18); 4422 uzp2(v27, T8H, v16, v18); 4423 eor(v16, T16B, v26, v27); 4424 4425 ushll2(v22, T4S, v20, T8H, 8); 4426 ushll(v20, T4S, v20, T4H, 8); 4427 4428 ushll2(v18, T4S, v16, T8H, 8); 4429 ushll(v16, T4S, v16, T4H, 8); 4430 4431 eor(v22, T16B, v23, v22); 4432 eor(v18, T16B, v19, v18); 4433 eor(v20, T16B, v21, v20); 4434 eor(v16, T16B, v17, v16); 4435 4436 uzp1(v17, T2D, v16, v20); 4437 uzp2(v21, T2D, v16, v20); 4438 eor(v16, T16B, v17, v21); 4439 4440 ushll2(v20, T2D, v16, T4S, 16); 4441 ushll(v16, T2D, v16, T2S, 16); 4442 4443 eor(v20, T16B, v22, v20); 4444 eor(v16, T16B, v16, v18); 4445 4446 uzp1(v17, T2D, v20, v16); 4447 uzp2(v21, T2D, v20, v16); 4448 eor(v20, T16B, v17, v21); 4449 4450 shl(v16, T2D, v28, 1); 4451 shl(v17, T2D, v20, 1); 4452 4453 eor(v0, T16B, v0, v16); 4454 eor(v1, T16B, v1, v17); 4455 4456 subs(len, len, 32); 4457 br(Assembler::GE, L_fold); 4458 4459 mov(crc, 0); 4460 mov(tmp, v0, D, 0); 4461 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4462 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4463 mov(tmp, v0, D, 1); 4464 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4465 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4466 mov(tmp, v1, D, 0); 4467 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4468 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4469 mov(tmp, v1, D, 1); 4470 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4471 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4472 4473 add(len, len, 32); 4474 } // Neon code end 4475 4476 BIND(L_by16); 4477 subs(len, len, 16); 4478 br(Assembler::GE, L_by16_loop); 4479 adds(len, len, 16-4); 4480 br(Assembler::GE, L_by4_loop); 4481 adds(len, len, 4); 4482 br(Assembler::GT, L_by1_loop); 4483 b(L_exit); 4484 4485 BIND(L_by4_loop); 4486 ldrw(tmp, Address(post(buf, 4))); 4487 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4488 subs(len, len, 4); 4489 br(Assembler::GE, L_by4_loop); 4490 adds(len, len, 4); 4491 br(Assembler::LE, L_exit); 4492 BIND(L_by1_loop); 4493 subs(len, len, 1); 4494 ldrb(tmp, Address(post(buf, 1))); 4495 update_byte_crc32(crc, tmp, table0); 4496 br(Assembler::GT, L_by1_loop); 4497 b(L_exit); 4498 4499 align(CodeEntryAlignment); 4500 BIND(L_by16_loop); 4501 subs(len, len, 16); 4502 ldp(tmp, tmp3, Address(post(buf, 16))); 4503 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4504 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4505 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4506 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4507 br(Assembler::GE, L_by16_loop); 4508 adds(len, len, 16-4); 4509 br(Assembler::GE, L_by4_loop); 4510 adds(len, len, 4); 4511 br(Assembler::GT, L_by1_loop); 4512 BIND(L_exit); 4513 mvnw(crc, crc); 4514 } 4515 4516 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4517 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4518 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4519 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4520 4521 subs(tmp0, len, 384); 4522 br(Assembler::GE, CRC_by128_pre); 4523 BIND(CRC_less128); 4524 subs(len, len, 32); 4525 br(Assembler::GE, CRC_by32_loop); 4526 BIND(CRC_less32); 4527 adds(len, len, 32 - 4); 4528 br(Assembler::GE, CRC_by4_loop); 4529 adds(len, len, 4); 4530 br(Assembler::GT, CRC_by1_loop); 4531 b(L_exit); 4532 4533 BIND(CRC_by32_loop); 4534 ldp(tmp0, tmp1, Address(buf)); 4535 crc32cx(crc, crc, tmp0); 4536 ldr(tmp2, Address(buf, 16)); 4537 crc32cx(crc, crc, tmp1); 4538 ldr(tmp3, Address(buf, 24)); 4539 crc32cx(crc, crc, tmp2); 4540 add(buf, buf, 32); 4541 subs(len, len, 32); 4542 crc32cx(crc, crc, tmp3); 4543 br(Assembler::GE, CRC_by32_loop); 4544 cmn(len, (u1)32); 4545 br(Assembler::NE, CRC_less32); 4546 b(L_exit); 4547 4548 BIND(CRC_by4_loop); 4549 ldrw(tmp0, Address(post(buf, 4))); 4550 subs(len, len, 4); 4551 crc32cw(crc, crc, tmp0); 4552 br(Assembler::GE, CRC_by4_loop); 4553 adds(len, len, 4); 4554 br(Assembler::LE, L_exit); 4555 BIND(CRC_by1_loop); 4556 ldrb(tmp0, Address(post(buf, 1))); 4557 subs(len, len, 1); 4558 crc32cb(crc, crc, tmp0); 4559 br(Assembler::GT, CRC_by1_loop); 4560 b(L_exit); 4561 4562 BIND(CRC_by128_pre); 4563 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4564 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4565 mov(crc, 0); 4566 crc32cx(crc, crc, tmp0); 4567 crc32cx(crc, crc, tmp1); 4568 4569 cbnz(len, CRC_less128); 4570 4571 BIND(L_exit); 4572 } 4573 4574 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4575 Register len, Register tmp0, Register tmp1, Register tmp2, 4576 Register tmp3) { 4577 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4578 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4579 4580 subs(len, len, 128); 4581 br(Assembler::GE, CRC_by64_pre); 4582 BIND(CRC_less64); 4583 adds(len, len, 128-32); 4584 br(Assembler::GE, CRC_by32_loop); 4585 BIND(CRC_less32); 4586 adds(len, len, 32-4); 4587 br(Assembler::GE, CRC_by4_loop); 4588 adds(len, len, 4); 4589 br(Assembler::GT, CRC_by1_loop); 4590 b(L_exit); 4591 4592 BIND(CRC_by32_loop); 4593 ldp(tmp0, tmp1, Address(post(buf, 16))); 4594 subs(len, len, 32); 4595 crc32cx(crc, crc, tmp0); 4596 ldr(tmp2, Address(post(buf, 8))); 4597 crc32cx(crc, crc, tmp1); 4598 ldr(tmp3, Address(post(buf, 8))); 4599 crc32cx(crc, crc, tmp2); 4600 crc32cx(crc, crc, tmp3); 4601 br(Assembler::GE, CRC_by32_loop); 4602 cmn(len, (u1)32); 4603 br(Assembler::NE, CRC_less32); 4604 b(L_exit); 4605 4606 BIND(CRC_by4_loop); 4607 ldrw(tmp0, Address(post(buf, 4))); 4608 subs(len, len, 4); 4609 crc32cw(crc, crc, tmp0); 4610 br(Assembler::GE, CRC_by4_loop); 4611 adds(len, len, 4); 4612 br(Assembler::LE, L_exit); 4613 BIND(CRC_by1_loop); 4614 ldrb(tmp0, Address(post(buf, 1))); 4615 subs(len, len, 1); 4616 crc32cb(crc, crc, tmp0); 4617 br(Assembler::GT, CRC_by1_loop); 4618 b(L_exit); 4619 4620 BIND(CRC_by64_pre); 4621 sub(buf, buf, 8); 4622 ldp(tmp0, tmp1, Address(buf, 8)); 4623 crc32cx(crc, crc, tmp0); 4624 ldr(tmp2, Address(buf, 24)); 4625 crc32cx(crc, crc, tmp1); 4626 ldr(tmp3, Address(buf, 32)); 4627 crc32cx(crc, crc, tmp2); 4628 ldr(tmp0, Address(buf, 40)); 4629 crc32cx(crc, crc, tmp3); 4630 ldr(tmp1, Address(buf, 48)); 4631 crc32cx(crc, crc, tmp0); 4632 ldr(tmp2, Address(buf, 56)); 4633 crc32cx(crc, crc, tmp1); 4634 ldr(tmp3, Address(pre(buf, 64))); 4635 4636 b(CRC_by64_loop); 4637 4638 align(CodeEntryAlignment); 4639 BIND(CRC_by64_loop); 4640 subs(len, len, 64); 4641 crc32cx(crc, crc, tmp2); 4642 ldr(tmp0, Address(buf, 8)); 4643 crc32cx(crc, crc, tmp3); 4644 ldr(tmp1, Address(buf, 16)); 4645 crc32cx(crc, crc, tmp0); 4646 ldr(tmp2, Address(buf, 24)); 4647 crc32cx(crc, crc, tmp1); 4648 ldr(tmp3, Address(buf, 32)); 4649 crc32cx(crc, crc, tmp2); 4650 ldr(tmp0, Address(buf, 40)); 4651 crc32cx(crc, crc, tmp3); 4652 ldr(tmp1, Address(buf, 48)); 4653 crc32cx(crc, crc, tmp0); 4654 ldr(tmp2, Address(buf, 56)); 4655 crc32cx(crc, crc, tmp1); 4656 ldr(tmp3, Address(pre(buf, 64))); 4657 br(Assembler::GE, CRC_by64_loop); 4658 4659 // post-loop 4660 crc32cx(crc, crc, tmp2); 4661 crc32cx(crc, crc, tmp3); 4662 4663 sub(len, len, 64); 4664 add(buf, buf, 8); 4665 cmn(len, (u1)128); 4666 br(Assembler::NE, CRC_less64); 4667 BIND(L_exit); 4668 } 4669 4670 /** 4671 * @param crc register containing existing CRC (32-bit) 4672 * @param buf register pointing to input byte buffer (byte*) 4673 * @param len register containing number of bytes 4674 * @param table register that will contain address of CRC table 4675 * @param tmp scratch register 4676 */ 4677 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4678 Register table0, Register table1, Register table2, Register table3, 4679 Register tmp, Register tmp2, Register tmp3) { 4680 if (UseCryptoPmullForCRC32) { 4681 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4682 } else { 4683 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4684 } 4685 } 4686 4687 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4688 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4689 Label CRC_by128_loop; 4690 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4691 4692 sub(len, len, 256); 4693 Register table = tmp0; 4694 { 4695 uint64_t offset; 4696 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4697 add(table, table, offset); 4698 } 4699 add(table, table, table_offset); 4700 4701 // Registers v0..v7 are used as data registers. 4702 // Registers v16..v31 are used as tmp registers. 4703 sub(buf, buf, 0x10); 4704 ldrq(v0, Address(buf, 0x10)); 4705 ldrq(v1, Address(buf, 0x20)); 4706 ldrq(v2, Address(buf, 0x30)); 4707 ldrq(v3, Address(buf, 0x40)); 4708 ldrq(v4, Address(buf, 0x50)); 4709 ldrq(v5, Address(buf, 0x60)); 4710 ldrq(v6, Address(buf, 0x70)); 4711 ldrq(v7, Address(pre(buf, 0x80))); 4712 4713 movi(v31, T4S, 0); 4714 mov(v31, S, 0, crc); 4715 eor(v0, T16B, v0, v31); 4716 4717 // Register v16 contains constants from the crc table. 4718 ldrq(v16, Address(table)); 4719 b(CRC_by128_loop); 4720 4721 align(OptoLoopAlignment); 4722 BIND(CRC_by128_loop); 4723 pmull (v17, T1Q, v0, v16, T1D); 4724 pmull2(v18, T1Q, v0, v16, T2D); 4725 ldrq(v0, Address(buf, 0x10)); 4726 eor3(v0, T16B, v17, v18, v0); 4727 4728 pmull (v19, T1Q, v1, v16, T1D); 4729 pmull2(v20, T1Q, v1, v16, T2D); 4730 ldrq(v1, Address(buf, 0x20)); 4731 eor3(v1, T16B, v19, v20, v1); 4732 4733 pmull (v21, T1Q, v2, v16, T1D); 4734 pmull2(v22, T1Q, v2, v16, T2D); 4735 ldrq(v2, Address(buf, 0x30)); 4736 eor3(v2, T16B, v21, v22, v2); 4737 4738 pmull (v23, T1Q, v3, v16, T1D); 4739 pmull2(v24, T1Q, v3, v16, T2D); 4740 ldrq(v3, Address(buf, 0x40)); 4741 eor3(v3, T16B, v23, v24, v3); 4742 4743 pmull (v25, T1Q, v4, v16, T1D); 4744 pmull2(v26, T1Q, v4, v16, T2D); 4745 ldrq(v4, Address(buf, 0x50)); 4746 eor3(v4, T16B, v25, v26, v4); 4747 4748 pmull (v27, T1Q, v5, v16, T1D); 4749 pmull2(v28, T1Q, v5, v16, T2D); 4750 ldrq(v5, Address(buf, 0x60)); 4751 eor3(v5, T16B, v27, v28, v5); 4752 4753 pmull (v29, T1Q, v6, v16, T1D); 4754 pmull2(v30, T1Q, v6, v16, T2D); 4755 ldrq(v6, Address(buf, 0x70)); 4756 eor3(v6, T16B, v29, v30, v6); 4757 4758 // Reuse registers v23, v24. 4759 // Using them won't block the first instruction of the next iteration. 4760 pmull (v23, T1Q, v7, v16, T1D); 4761 pmull2(v24, T1Q, v7, v16, T2D); 4762 ldrq(v7, Address(pre(buf, 0x80))); 4763 eor3(v7, T16B, v23, v24, v7); 4764 4765 subs(len, len, 0x80); 4766 br(Assembler::GE, CRC_by128_loop); 4767 4768 // fold into 512 bits 4769 // Use v31 for constants because v16 can be still in use. 4770 ldrq(v31, Address(table, 0x10)); 4771 4772 pmull (v17, T1Q, v0, v31, T1D); 4773 pmull2(v18, T1Q, v0, v31, T2D); 4774 eor3(v0, T16B, v17, v18, v4); 4775 4776 pmull (v19, T1Q, v1, v31, T1D); 4777 pmull2(v20, T1Q, v1, v31, T2D); 4778 eor3(v1, T16B, v19, v20, v5); 4779 4780 pmull (v21, T1Q, v2, v31, T1D); 4781 pmull2(v22, T1Q, v2, v31, T2D); 4782 eor3(v2, T16B, v21, v22, v6); 4783 4784 pmull (v23, T1Q, v3, v31, T1D); 4785 pmull2(v24, T1Q, v3, v31, T2D); 4786 eor3(v3, T16B, v23, v24, v7); 4787 4788 // fold into 128 bits 4789 // Use v17 for constants because v31 can be still in use. 4790 ldrq(v17, Address(table, 0x20)); 4791 pmull (v25, T1Q, v0, v17, T1D); 4792 pmull2(v26, T1Q, v0, v17, T2D); 4793 eor3(v3, T16B, v3, v25, v26); 4794 4795 // Use v18 for constants because v17 can be still in use. 4796 ldrq(v18, Address(table, 0x30)); 4797 pmull (v27, T1Q, v1, v18, T1D); 4798 pmull2(v28, T1Q, v1, v18, T2D); 4799 eor3(v3, T16B, v3, v27, v28); 4800 4801 // Use v19 for constants because v18 can be still in use. 4802 ldrq(v19, Address(table, 0x40)); 4803 pmull (v29, T1Q, v2, v19, T1D); 4804 pmull2(v30, T1Q, v2, v19, T2D); 4805 eor3(v0, T16B, v3, v29, v30); 4806 4807 add(len, len, 0x80); 4808 add(buf, buf, 0x10); 4809 4810 mov(tmp0, v0, D, 0); 4811 mov(tmp1, v0, D, 1); 4812 } 4813 4814 SkipIfEqual::SkipIfEqual( 4815 MacroAssembler* masm, const bool* flag_addr, bool value) { 4816 _masm = masm; 4817 uint64_t offset; 4818 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4819 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4820 if (value) { 4821 _masm->cbnzw(rscratch1, _label); 4822 } else { 4823 _masm->cbzw(rscratch1, _label); 4824 } 4825 } 4826 4827 SkipIfEqual::~SkipIfEqual() { 4828 _masm->bind(_label); 4829 } 4830 4831 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4832 Address adr; 4833 switch(dst.getMode()) { 4834 case Address::base_plus_offset: 4835 // This is the expected mode, although we allow all the other 4836 // forms below. 4837 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4838 break; 4839 default: 4840 lea(rscratch2, dst); 4841 adr = Address(rscratch2); 4842 break; 4843 } 4844 ldr(rscratch1, adr); 4845 add(rscratch1, rscratch1, src); 4846 str(rscratch1, adr); 4847 } 4848 4849 void MacroAssembler::cmpptr(Register src1, Address src2) { 4850 uint64_t offset; 4851 adrp(rscratch1, src2, offset); 4852 ldr(rscratch1, Address(rscratch1, offset)); 4853 cmp(src1, rscratch1); 4854 } 4855 4856 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4857 cmp(obj1, obj2); 4858 } 4859 4860 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4861 load_method_holder(rresult, rmethod); 4862 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4863 } 4864 4865 void MacroAssembler::load_method_holder(Register holder, Register method) { 4866 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4867 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4868 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4869 } 4870 4871 void MacroAssembler::load_klass(Register dst, Register src) { 4872 if (UseCompressedClassPointers) { 4873 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4874 decode_klass_not_null(dst); 4875 } else { 4876 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4877 } 4878 } 4879 4880 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4881 if (RestoreMXCSROnJNICalls) { 4882 Label OK; 4883 get_fpcr(tmp1); 4884 mov(tmp2, tmp1); 4885 // Set FPCR to the state we need. We do want Round to Nearest. We 4886 // don't want non-IEEE rounding modes or floating-point traps. 4887 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4888 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4889 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4890 eor(tmp2, tmp1, tmp2); 4891 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4892 set_fpcr(tmp1); 4893 bind(OK); 4894 } 4895 } 4896 4897 // ((OopHandle)result).resolve(); 4898 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4899 // OopHandle::resolve is an indirection. 4900 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4901 } 4902 4903 // ((WeakHandle)result).resolve(); 4904 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4905 assert_different_registers(result, tmp1, tmp2); 4906 Label resolved; 4907 4908 // A null weak handle resolves to null. 4909 cbz(result, resolved); 4910 4911 // Only 64 bit platforms support GCs that require a tmp register 4912 // WeakHandle::resolve is an indirection like jweak. 4913 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4914 result, Address(result), tmp1, tmp2); 4915 bind(resolved); 4916 } 4917 4918 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4919 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4920 ldr(dst, Address(rmethod, Method::const_offset())); 4921 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4922 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4923 ldr(dst, Address(dst, mirror_offset)); 4924 resolve_oop_handle(dst, tmp1, tmp2); 4925 } 4926 4927 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4928 if (UseCompressedClassPointers) { 4929 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4930 if (CompressedKlassPointers::base() == nullptr) { 4931 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4932 return; 4933 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4934 && CompressedKlassPointers::shift() == 0) { 4935 // Only the bottom 32 bits matter 4936 cmpw(trial_klass, tmp); 4937 return; 4938 } 4939 decode_klass_not_null(tmp); 4940 } else { 4941 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4942 } 4943 cmp(trial_klass, tmp); 4944 } 4945 4946 void MacroAssembler::store_klass(Register dst, Register src) { 4947 // FIXME: Should this be a store release? concurrent gcs assumes 4948 // klass length is valid if klass field is not null. 4949 if (UseCompressedClassPointers) { 4950 encode_klass_not_null(src); 4951 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4952 } else { 4953 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4954 } 4955 } 4956 4957 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4958 if (UseCompressedClassPointers) { 4959 // Store to klass gap in destination 4960 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4961 } 4962 } 4963 4964 // Algorithm must match CompressedOops::encode. 4965 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4966 #ifdef ASSERT 4967 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4968 #endif 4969 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4970 if (CompressedOops::base() == nullptr) { 4971 if (CompressedOops::shift() != 0) { 4972 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4973 lsr(d, s, LogMinObjAlignmentInBytes); 4974 } else { 4975 mov(d, s); 4976 } 4977 } else { 4978 subs(d, s, rheapbase); 4979 csel(d, d, zr, Assembler::HS); 4980 lsr(d, d, LogMinObjAlignmentInBytes); 4981 4982 /* Old algorithm: is this any worse? 4983 Label nonnull; 4984 cbnz(r, nonnull); 4985 sub(r, r, rheapbase); 4986 bind(nonnull); 4987 lsr(r, r, LogMinObjAlignmentInBytes); 4988 */ 4989 } 4990 } 4991 4992 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4993 #ifdef ASSERT 4994 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4995 if (CheckCompressedOops) { 4996 Label ok; 4997 cbnz(r, ok); 4998 stop("null oop passed to encode_heap_oop_not_null"); 4999 bind(ok); 5000 } 5001 #endif 5002 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5003 if (CompressedOops::base() != nullptr) { 5004 sub(r, r, rheapbase); 5005 } 5006 if (CompressedOops::shift() != 0) { 5007 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5008 lsr(r, r, LogMinObjAlignmentInBytes); 5009 } 5010 } 5011 5012 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5013 #ifdef ASSERT 5014 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5015 if (CheckCompressedOops) { 5016 Label ok; 5017 cbnz(src, ok); 5018 stop("null oop passed to encode_heap_oop_not_null2"); 5019 bind(ok); 5020 } 5021 #endif 5022 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5023 5024 Register data = src; 5025 if (CompressedOops::base() != nullptr) { 5026 sub(dst, src, rheapbase); 5027 data = dst; 5028 } 5029 if (CompressedOops::shift() != 0) { 5030 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5031 lsr(dst, data, LogMinObjAlignmentInBytes); 5032 data = dst; 5033 } 5034 if (data == src) 5035 mov(dst, src); 5036 } 5037 5038 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5039 #ifdef ASSERT 5040 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5041 #endif 5042 if (CompressedOops::base() == nullptr) { 5043 if (CompressedOops::shift() != 0 || d != s) { 5044 lsl(d, s, CompressedOops::shift()); 5045 } 5046 } else { 5047 Label done; 5048 if (d != s) 5049 mov(d, s); 5050 cbz(s, done); 5051 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5052 bind(done); 5053 } 5054 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5055 } 5056 5057 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5058 assert (UseCompressedOops, "should only be used for compressed headers"); 5059 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5060 // Cannot assert, unverified entry point counts instructions (see .ad file) 5061 // vtableStubs also counts instructions in pd_code_size_limit. 5062 // Also do not verify_oop as this is called by verify_oop. 5063 if (CompressedOops::shift() != 0) { 5064 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5065 if (CompressedOops::base() != nullptr) { 5066 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5067 } else { 5068 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5069 } 5070 } else { 5071 assert (CompressedOops::base() == nullptr, "sanity"); 5072 } 5073 } 5074 5075 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5076 assert (UseCompressedOops, "should only be used for compressed headers"); 5077 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5078 // Cannot assert, unverified entry point counts instructions (see .ad file) 5079 // vtableStubs also counts instructions in pd_code_size_limit. 5080 // Also do not verify_oop as this is called by verify_oop. 5081 if (CompressedOops::shift() != 0) { 5082 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5083 if (CompressedOops::base() != nullptr) { 5084 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5085 } else { 5086 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5087 } 5088 } else { 5089 assert (CompressedOops::base() == nullptr, "sanity"); 5090 if (dst != src) { 5091 mov(dst, src); 5092 } 5093 } 5094 } 5095 5096 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5097 5098 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5099 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5100 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5101 5102 if (_klass_decode_mode != KlassDecodeNone) { 5103 return _klass_decode_mode; 5104 } 5105 5106 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 5107 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 5108 5109 if (CompressedKlassPointers::base() == nullptr) { 5110 return (_klass_decode_mode = KlassDecodeZero); 5111 } 5112 5113 if (operand_valid_for_logical_immediate( 5114 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 5115 const uint64_t range_mask = 5116 (1ULL << log2i(CompressedKlassPointers::range())) - 1; 5117 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 5118 return (_klass_decode_mode = KlassDecodeXor); 5119 } 5120 } 5121 5122 const uint64_t shifted_base = 5123 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5124 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 5125 "compressed class base bad alignment"); 5126 5127 return (_klass_decode_mode = KlassDecodeMovk); 5128 } 5129 5130 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5131 switch (klass_decode_mode()) { 5132 case KlassDecodeZero: 5133 if (CompressedKlassPointers::shift() != 0) { 5134 lsr(dst, src, LogKlassAlignmentInBytes); 5135 } else { 5136 if (dst != src) mov(dst, src); 5137 } 5138 break; 5139 5140 case KlassDecodeXor: 5141 if (CompressedKlassPointers::shift() != 0) { 5142 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5143 lsr(dst, dst, LogKlassAlignmentInBytes); 5144 } else { 5145 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5146 } 5147 break; 5148 5149 case KlassDecodeMovk: 5150 if (CompressedKlassPointers::shift() != 0) { 5151 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 5152 } else { 5153 movw(dst, src); 5154 } 5155 break; 5156 5157 case KlassDecodeNone: 5158 ShouldNotReachHere(); 5159 break; 5160 } 5161 } 5162 5163 void MacroAssembler::encode_klass_not_null(Register r) { 5164 encode_klass_not_null(r, r); 5165 } 5166 5167 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5168 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5169 5170 switch (klass_decode_mode()) { 5171 case KlassDecodeZero: 5172 if (CompressedKlassPointers::shift() != 0) { 5173 lsl(dst, src, LogKlassAlignmentInBytes); 5174 } else { 5175 if (dst != src) mov(dst, src); 5176 } 5177 break; 5178 5179 case KlassDecodeXor: 5180 if (CompressedKlassPointers::shift() != 0) { 5181 lsl(dst, src, LogKlassAlignmentInBytes); 5182 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5183 } else { 5184 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5185 } 5186 break; 5187 5188 case KlassDecodeMovk: { 5189 const uint64_t shifted_base = 5190 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5191 5192 if (dst != src) movw(dst, src); 5193 movk(dst, shifted_base >> 32, 32); 5194 5195 if (CompressedKlassPointers::shift() != 0) { 5196 lsl(dst, dst, LogKlassAlignmentInBytes); 5197 } 5198 5199 break; 5200 } 5201 5202 case KlassDecodeNone: 5203 ShouldNotReachHere(); 5204 break; 5205 } 5206 } 5207 5208 void MacroAssembler::decode_klass_not_null(Register r) { 5209 decode_klass_not_null(r, r); 5210 } 5211 5212 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5213 #ifdef ASSERT 5214 { 5215 ThreadInVMfromUnknown tiv; 5216 assert (UseCompressedOops, "should only be used for compressed oops"); 5217 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5218 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5219 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5220 } 5221 #endif 5222 int oop_index = oop_recorder()->find_index(obj); 5223 InstructionMark im(this); 5224 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5225 code_section()->relocate(inst_mark(), rspec); 5226 movz(dst, 0xDEAD, 16); 5227 movk(dst, 0xBEEF); 5228 } 5229 5230 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5231 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5232 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5233 int index = oop_recorder()->find_index(k); 5234 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5235 5236 InstructionMark im(this); 5237 RelocationHolder rspec = metadata_Relocation::spec(index); 5238 code_section()->relocate(inst_mark(), rspec); 5239 narrowKlass nk = CompressedKlassPointers::encode(k); 5240 movz(dst, (nk >> 16), 16); 5241 movk(dst, nk & 0xffff); 5242 } 5243 5244 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5245 Register dst, Address src, 5246 Register tmp1, Register tmp2) { 5247 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5248 decorators = AccessInternal::decorator_fixup(decorators, type); 5249 bool as_raw = (decorators & AS_RAW) != 0; 5250 if (as_raw) { 5251 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5252 } else { 5253 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5254 } 5255 } 5256 5257 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5258 Address dst, Register val, 5259 Register tmp1, Register tmp2, Register tmp3) { 5260 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5261 decorators = AccessInternal::decorator_fixup(decorators, type); 5262 bool as_raw = (decorators & AS_RAW) != 0; 5263 if (as_raw) { 5264 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5265 } else { 5266 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5267 } 5268 } 5269 5270 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5271 Register tmp2, DecoratorSet decorators) { 5272 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5273 } 5274 5275 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5276 Register tmp2, DecoratorSet decorators) { 5277 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5278 } 5279 5280 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5281 Register tmp2, Register tmp3, DecoratorSet decorators) { 5282 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5283 } 5284 5285 // Used for storing nulls. 5286 void MacroAssembler::store_heap_oop_null(Address dst) { 5287 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5288 } 5289 5290 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5291 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5292 int index = oop_recorder()->allocate_metadata_index(obj); 5293 RelocationHolder rspec = metadata_Relocation::spec(index); 5294 return Address((address)obj, rspec); 5295 } 5296 5297 // Move an oop into a register. 5298 void MacroAssembler::movoop(Register dst, jobject obj) { 5299 int oop_index; 5300 if (obj == nullptr) { 5301 oop_index = oop_recorder()->allocate_oop_index(obj); 5302 } else { 5303 #ifdef ASSERT 5304 { 5305 ThreadInVMfromUnknown tiv; 5306 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5307 } 5308 #endif 5309 oop_index = oop_recorder()->find_index(obj); 5310 } 5311 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5312 5313 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5314 mov(dst, Address((address)obj, rspec)); 5315 } else { 5316 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5317 ldr_constant(dst, Address(dummy, rspec)); 5318 } 5319 5320 } 5321 5322 // Move a metadata address into a register. 5323 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5324 int oop_index; 5325 if (obj == nullptr) { 5326 oop_index = oop_recorder()->allocate_metadata_index(obj); 5327 } else { 5328 oop_index = oop_recorder()->find_index(obj); 5329 } 5330 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5331 mov(dst, Address((address)obj, rspec)); 5332 } 5333 5334 Address MacroAssembler::constant_oop_address(jobject obj) { 5335 #ifdef ASSERT 5336 { 5337 ThreadInVMfromUnknown tiv; 5338 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5339 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5340 } 5341 #endif 5342 int oop_index = oop_recorder()->find_index(obj); 5343 return Address((address)obj, oop_Relocation::spec(oop_index)); 5344 } 5345 5346 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5347 void MacroAssembler::tlab_allocate(Register obj, 5348 Register var_size_in_bytes, 5349 int con_size_in_bytes, 5350 Register t1, 5351 Register t2, 5352 Label& slow_case) { 5353 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5354 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5355 } 5356 5357 void MacroAssembler::verify_tlab() { 5358 #ifdef ASSERT 5359 if (UseTLAB && VerifyOops) { 5360 Label next, ok; 5361 5362 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5363 5364 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5365 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5366 cmp(rscratch2, rscratch1); 5367 br(Assembler::HS, next); 5368 STOP("assert(top >= start)"); 5369 should_not_reach_here(); 5370 5371 bind(next); 5372 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5373 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5374 cmp(rscratch2, rscratch1); 5375 br(Assembler::HS, ok); 5376 STOP("assert(top <= end)"); 5377 should_not_reach_here(); 5378 5379 bind(ok); 5380 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5381 } 5382 #endif 5383 } 5384 5385 // Writes to stack successive pages until offset reached to check for 5386 // stack overflow + shadow pages. This clobbers tmp. 5387 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5388 assert_different_registers(tmp, size, rscratch1); 5389 mov(tmp, sp); 5390 // Bang stack for total size given plus shadow page size. 5391 // Bang one page at a time because large size can bang beyond yellow and 5392 // red zones. 5393 Label loop; 5394 mov(rscratch1, (int)os::vm_page_size()); 5395 bind(loop); 5396 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5397 subsw(size, size, rscratch1); 5398 str(size, Address(tmp)); 5399 br(Assembler::GT, loop); 5400 5401 // Bang down shadow pages too. 5402 // At this point, (tmp-0) is the last address touched, so don't 5403 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5404 // was post-decremented.) Skip this address by starting at i=1, and 5405 // touch a few more pages below. N.B. It is important to touch all 5406 // the way down to and including i=StackShadowPages. 5407 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5408 // this could be any sized move but this is can be a debugging crumb 5409 // so the bigger the better. 5410 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5411 str(size, Address(tmp)); 5412 } 5413 } 5414 5415 // Move the address of the polling page into dest. 5416 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5417 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5418 } 5419 5420 // Read the polling page. The address of the polling page must 5421 // already be in r. 5422 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5423 address mark; 5424 { 5425 InstructionMark im(this); 5426 code_section()->relocate(inst_mark(), rtype); 5427 ldrw(zr, Address(r, 0)); 5428 mark = inst_mark(); 5429 } 5430 verify_cross_modify_fence_not_required(); 5431 return mark; 5432 } 5433 5434 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5435 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5436 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5437 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5438 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5439 int64_t offset_low = dest_page - low_page; 5440 int64_t offset_high = dest_page - high_page; 5441 5442 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5443 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5444 5445 InstructionMark im(this); 5446 code_section()->relocate(inst_mark(), dest.rspec()); 5447 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5448 // the code cache so that if it is relocated we know it will still reach 5449 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5450 _adrp(reg1, dest.target()); 5451 } else { 5452 uint64_t target = (uint64_t)dest.target(); 5453 uint64_t adrp_target 5454 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5455 5456 _adrp(reg1, (address)adrp_target); 5457 movk(reg1, target >> 32, 32); 5458 } 5459 byte_offset = (uint64_t)dest.target() & 0xfff; 5460 } 5461 5462 void MacroAssembler::load_byte_map_base(Register reg) { 5463 CardTable::CardValue* byte_map_base = 5464 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5465 5466 // Strictly speaking the byte_map_base isn't an address at all, and it might 5467 // even be negative. It is thus materialised as a constant. 5468 if (SCCache::is_on_for_write()) { 5469 // SCA needs relocation info for card table base 5470 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base))); 5471 } else { 5472 mov(reg, (uint64_t)byte_map_base); 5473 } 5474 } 5475 5476 void MacroAssembler::build_frame(int framesize) { 5477 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5478 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5479 protect_return_address(); 5480 if (framesize < ((1 << 9) + 2 * wordSize)) { 5481 sub(sp, sp, framesize); 5482 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5483 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5484 } else { 5485 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5486 if (PreserveFramePointer) mov(rfp, sp); 5487 if (framesize < ((1 << 12) + 2 * wordSize)) 5488 sub(sp, sp, framesize - 2 * wordSize); 5489 else { 5490 mov(rscratch1, framesize - 2 * wordSize); 5491 sub(sp, sp, rscratch1); 5492 } 5493 } 5494 verify_cross_modify_fence_not_required(); 5495 } 5496 5497 void MacroAssembler::remove_frame(int framesize) { 5498 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5499 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5500 if (framesize < ((1 << 9) + 2 * wordSize)) { 5501 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5502 add(sp, sp, framesize); 5503 } else { 5504 if (framesize < ((1 << 12) + 2 * wordSize)) 5505 add(sp, sp, framesize - 2 * wordSize); 5506 else { 5507 mov(rscratch1, framesize - 2 * wordSize); 5508 add(sp, sp, rscratch1); 5509 } 5510 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5511 } 5512 authenticate_return_address(); 5513 } 5514 5515 5516 // This method counts leading positive bytes (highest bit not set) in provided byte array 5517 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5518 // Simple and most common case of aligned small array which is not at the 5519 // end of memory page is placed here. All other cases are in stub. 5520 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5521 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5522 assert_different_registers(ary1, len, result); 5523 5524 mov(result, len); 5525 cmpw(len, 0); 5526 br(LE, DONE); 5527 cmpw(len, 4 * wordSize); 5528 br(GE, STUB_LONG); // size > 32 then go to stub 5529 5530 int shift = 64 - exact_log2(os::vm_page_size()); 5531 lsl(rscratch1, ary1, shift); 5532 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5533 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5534 br(CS, STUB); // at the end of page then go to stub 5535 subs(len, len, wordSize); 5536 br(LT, END); 5537 5538 BIND(LOOP); 5539 ldr(rscratch1, Address(post(ary1, wordSize))); 5540 tst(rscratch1, UPPER_BIT_MASK); 5541 br(NE, SET_RESULT); 5542 subs(len, len, wordSize); 5543 br(GE, LOOP); 5544 cmpw(len, -wordSize); 5545 br(EQ, DONE); 5546 5547 BIND(END); 5548 ldr(rscratch1, Address(ary1)); 5549 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5550 lslv(rscratch1, rscratch1, rscratch2); 5551 tst(rscratch1, UPPER_BIT_MASK); 5552 br(NE, SET_RESULT); 5553 b(DONE); 5554 5555 BIND(STUB); 5556 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5557 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5558 address tpc1 = trampoline_call(count_pos); 5559 if (tpc1 == nullptr) { 5560 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5561 postcond(pc() == badAddress); 5562 return nullptr; 5563 } 5564 b(DONE); 5565 5566 BIND(STUB_LONG); 5567 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5568 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5569 address tpc2 = trampoline_call(count_pos_long); 5570 if (tpc2 == nullptr) { 5571 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5572 postcond(pc() == badAddress); 5573 return nullptr; 5574 } 5575 b(DONE); 5576 5577 BIND(SET_RESULT); 5578 5579 add(len, len, wordSize); 5580 sub(result, result, len); 5581 5582 BIND(DONE); 5583 postcond(pc() != badAddress); 5584 return pc(); 5585 } 5586 5587 // Clobbers: rscratch1, rscratch2, rflags 5588 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5589 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5590 Register tmp4, Register tmp5, Register result, 5591 Register cnt1, int elem_size) { 5592 Label DONE, SAME; 5593 Register tmp1 = rscratch1; 5594 Register tmp2 = rscratch2; 5595 int elem_per_word = wordSize/elem_size; 5596 int log_elem_size = exact_log2(elem_size); 5597 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5598 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5599 int base_offset 5600 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5601 // When the length offset is not aligned to 8 bytes, 5602 // then we align it down. This is valid because the new 5603 // offset will always be the klass which is the same 5604 // for type arrays. 5605 int start_offset = align_down(length_offset, BytesPerWord); 5606 int extra_length = base_offset - start_offset; 5607 assert(start_offset == length_offset || start_offset == klass_offset, 5608 "start offset must be 8-byte-aligned or be the klass offset"); 5609 assert(base_offset != start_offset, "must include the length field"); 5610 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5611 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5612 5613 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5614 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5615 5616 #ifndef PRODUCT 5617 { 5618 const char kind = (elem_size == 2) ? 'U' : 'L'; 5619 char comment[64]; 5620 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5621 BLOCK_COMMENT(comment); 5622 } 5623 #endif 5624 5625 // if (a1 == a2) 5626 // return true; 5627 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5628 br(EQ, SAME); 5629 5630 if (UseSimpleArrayEquals) { 5631 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5632 // if (a1 == nullptr || a2 == nullptr) 5633 // return false; 5634 // a1 & a2 == 0 means (some-pointer is null) or 5635 // (very-rare-or-even-probably-impossible-pointer-values) 5636 // so, we can save one branch in most cases 5637 tst(a1, a2); 5638 mov(result, false); 5639 br(EQ, A_MIGHT_BE_NULL); 5640 // if (a1.length != a2.length) 5641 // return false; 5642 bind(A_IS_NOT_NULL); 5643 ldrw(cnt1, Address(a1, length_offset)); 5644 // Increase loop counter by diff between base- and actual start-offset. 5645 addw(cnt1, cnt1, extra_length); 5646 lea(a1, Address(a1, start_offset)); 5647 lea(a2, Address(a2, start_offset)); 5648 // Check for short strings, i.e. smaller than wordSize. 5649 subs(cnt1, cnt1, elem_per_word); 5650 br(Assembler::LT, SHORT); 5651 // Main 8 byte comparison loop. 5652 bind(NEXT_WORD); { 5653 ldr(tmp1, Address(post(a1, wordSize))); 5654 ldr(tmp2, Address(post(a2, wordSize))); 5655 subs(cnt1, cnt1, elem_per_word); 5656 eor(tmp5, tmp1, tmp2); 5657 cbnz(tmp5, DONE); 5658 } br(GT, NEXT_WORD); 5659 // Last longword. In the case where length == 4 we compare the 5660 // same longword twice, but that's still faster than another 5661 // conditional branch. 5662 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5663 // length == 4. 5664 if (log_elem_size > 0) 5665 lsl(cnt1, cnt1, log_elem_size); 5666 ldr(tmp3, Address(a1, cnt1)); 5667 ldr(tmp4, Address(a2, cnt1)); 5668 eor(tmp5, tmp3, tmp4); 5669 cbnz(tmp5, DONE); 5670 b(SAME); 5671 bind(A_MIGHT_BE_NULL); 5672 // in case both a1 and a2 are not-null, proceed with loads 5673 cbz(a1, DONE); 5674 cbz(a2, DONE); 5675 b(A_IS_NOT_NULL); 5676 bind(SHORT); 5677 5678 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5679 { 5680 ldrw(tmp1, Address(post(a1, 4))); 5681 ldrw(tmp2, Address(post(a2, 4))); 5682 eorw(tmp5, tmp1, tmp2); 5683 cbnzw(tmp5, DONE); 5684 } 5685 bind(TAIL03); 5686 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5687 { 5688 ldrh(tmp3, Address(post(a1, 2))); 5689 ldrh(tmp4, Address(post(a2, 2))); 5690 eorw(tmp5, tmp3, tmp4); 5691 cbnzw(tmp5, DONE); 5692 } 5693 bind(TAIL01); 5694 if (elem_size == 1) { // Only needed when comparing byte arrays. 5695 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5696 { 5697 ldrb(tmp1, a1); 5698 ldrb(tmp2, a2); 5699 eorw(tmp5, tmp1, tmp2); 5700 cbnzw(tmp5, DONE); 5701 } 5702 } 5703 } else { 5704 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5705 CSET_EQ, LAST_CHECK; 5706 mov(result, false); 5707 cbz(a1, DONE); 5708 ldrw(cnt1, Address(a1, length_offset)); 5709 cbz(a2, DONE); 5710 // Increase loop counter by diff between base- and actual start-offset. 5711 addw(cnt1, cnt1, extra_length); 5712 5713 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5714 // faster to perform another branch before comparing a1 and a2 5715 cmp(cnt1, (u1)elem_per_word); 5716 br(LE, SHORT); // short or same 5717 ldr(tmp3, Address(pre(a1, start_offset))); 5718 subs(zr, cnt1, stubBytesThreshold); 5719 br(GE, STUB); 5720 ldr(tmp4, Address(pre(a2, start_offset))); 5721 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5722 5723 // Main 16 byte comparison loop with 2 exits 5724 bind(NEXT_DWORD); { 5725 ldr(tmp1, Address(pre(a1, wordSize))); 5726 ldr(tmp2, Address(pre(a2, wordSize))); 5727 subs(cnt1, cnt1, 2 * elem_per_word); 5728 br(LE, TAIL); 5729 eor(tmp4, tmp3, tmp4); 5730 cbnz(tmp4, DONE); 5731 ldr(tmp3, Address(pre(a1, wordSize))); 5732 ldr(tmp4, Address(pre(a2, wordSize))); 5733 cmp(cnt1, (u1)elem_per_word); 5734 br(LE, TAIL2); 5735 cmp(tmp1, tmp2); 5736 } br(EQ, NEXT_DWORD); 5737 b(DONE); 5738 5739 bind(TAIL); 5740 eor(tmp4, tmp3, tmp4); 5741 eor(tmp2, tmp1, tmp2); 5742 lslv(tmp2, tmp2, tmp5); 5743 orr(tmp5, tmp4, tmp2); 5744 cmp(tmp5, zr); 5745 b(CSET_EQ); 5746 5747 bind(TAIL2); 5748 eor(tmp2, tmp1, tmp2); 5749 cbnz(tmp2, DONE); 5750 b(LAST_CHECK); 5751 5752 bind(STUB); 5753 ldr(tmp4, Address(pre(a2, start_offset))); 5754 if (elem_size == 2) { // convert to byte counter 5755 lsl(cnt1, cnt1, 1); 5756 } 5757 eor(tmp5, tmp3, tmp4); 5758 cbnz(tmp5, DONE); 5759 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5760 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5761 address tpc = trampoline_call(stub); 5762 if (tpc == nullptr) { 5763 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5764 postcond(pc() == badAddress); 5765 return nullptr; 5766 } 5767 b(DONE); 5768 5769 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5770 // so, if a2 == null => return false(0), else return true, so we can return a2 5771 mov(result, a2); 5772 b(DONE); 5773 bind(SHORT); 5774 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5775 ldr(tmp3, Address(a1, start_offset)); 5776 ldr(tmp4, Address(a2, start_offset)); 5777 bind(LAST_CHECK); 5778 eor(tmp4, tmp3, tmp4); 5779 lslv(tmp5, tmp4, tmp5); 5780 cmp(tmp5, zr); 5781 bind(CSET_EQ); 5782 cset(result, EQ); 5783 b(DONE); 5784 } 5785 5786 bind(SAME); 5787 mov(result, true); 5788 // That's it. 5789 bind(DONE); 5790 5791 BLOCK_COMMENT("} array_equals"); 5792 postcond(pc() != badAddress); 5793 return pc(); 5794 } 5795 5796 // Compare Strings 5797 5798 // For Strings we're passed the address of the first characters in a1 5799 // and a2 and the length in cnt1. 5800 // There are two implementations. For arrays >= 8 bytes, all 5801 // comparisons (including the final one, which may overlap) are 5802 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5803 // halfword, then a short, and then a byte. 5804 5805 void MacroAssembler::string_equals(Register a1, Register a2, 5806 Register result, Register cnt1) 5807 { 5808 Label SAME, DONE, SHORT, NEXT_WORD; 5809 Register tmp1 = rscratch1; 5810 Register tmp2 = rscratch2; 5811 Register cnt2 = tmp2; // cnt2 only used in array length compare 5812 5813 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5814 5815 #ifndef PRODUCT 5816 { 5817 char comment[64]; 5818 snprintf(comment, sizeof comment, "{string_equalsL"); 5819 BLOCK_COMMENT(comment); 5820 } 5821 #endif 5822 5823 mov(result, false); 5824 5825 // Check for short strings, i.e. smaller than wordSize. 5826 subs(cnt1, cnt1, wordSize); 5827 br(Assembler::LT, SHORT); 5828 // Main 8 byte comparison loop. 5829 bind(NEXT_WORD); { 5830 ldr(tmp1, Address(post(a1, wordSize))); 5831 ldr(tmp2, Address(post(a2, wordSize))); 5832 subs(cnt1, cnt1, wordSize); 5833 eor(tmp1, tmp1, tmp2); 5834 cbnz(tmp1, DONE); 5835 } br(GT, NEXT_WORD); 5836 // Last longword. In the case where length == 4 we compare the 5837 // same longword twice, but that's still faster than another 5838 // conditional branch. 5839 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5840 // length == 4. 5841 ldr(tmp1, Address(a1, cnt1)); 5842 ldr(tmp2, Address(a2, cnt1)); 5843 eor(tmp2, tmp1, tmp2); 5844 cbnz(tmp2, DONE); 5845 b(SAME); 5846 5847 bind(SHORT); 5848 Label TAIL03, TAIL01; 5849 5850 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5851 { 5852 ldrw(tmp1, Address(post(a1, 4))); 5853 ldrw(tmp2, Address(post(a2, 4))); 5854 eorw(tmp1, tmp1, tmp2); 5855 cbnzw(tmp1, DONE); 5856 } 5857 bind(TAIL03); 5858 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5859 { 5860 ldrh(tmp1, Address(post(a1, 2))); 5861 ldrh(tmp2, Address(post(a2, 2))); 5862 eorw(tmp1, tmp1, tmp2); 5863 cbnzw(tmp1, DONE); 5864 } 5865 bind(TAIL01); 5866 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5867 { 5868 ldrb(tmp1, a1); 5869 ldrb(tmp2, a2); 5870 eorw(tmp1, tmp1, tmp2); 5871 cbnzw(tmp1, DONE); 5872 } 5873 // Arrays are equal. 5874 bind(SAME); 5875 mov(result, true); 5876 5877 // That's it. 5878 bind(DONE); 5879 BLOCK_COMMENT("} string_equals"); 5880 } 5881 5882 5883 // The size of the blocks erased by the zero_blocks stub. We must 5884 // handle anything smaller than this ourselves in zero_words(). 5885 const int MacroAssembler::zero_words_block_size = 8; 5886 5887 // zero_words() is used by C2 ClearArray patterns and by 5888 // C1_MacroAssembler. It is as small as possible, handling small word 5889 // counts locally and delegating anything larger to the zero_blocks 5890 // stub. It is expanded many times in compiled code, so it is 5891 // important to keep it short. 5892 5893 // ptr: Address of a buffer to be zeroed. 5894 // cnt: Count in HeapWords. 5895 // 5896 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5897 address MacroAssembler::zero_words(Register ptr, Register cnt) 5898 { 5899 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5900 5901 BLOCK_COMMENT("zero_words {"); 5902 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5903 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5904 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5905 5906 subs(rscratch1, cnt, zero_words_block_size); 5907 Label around; 5908 br(LO, around); 5909 { 5910 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5911 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5912 // Make sure this is a C2 compilation. C1 allocates space only for 5913 // trampoline stubs generated by Call LIR ops, and in any case it 5914 // makes sense for a C1 compilation task to proceed as quickly as 5915 // possible. 5916 CompileTask* task; 5917 if (StubRoutines::aarch64::complete() 5918 && Thread::current()->is_Compiler_thread() 5919 && (task = ciEnv::current()->task()) 5920 && is_c2_compile(task->comp_level())) { 5921 address tpc = trampoline_call(zero_blocks); 5922 if (tpc == nullptr) { 5923 DEBUG_ONLY(reset_labels(around)); 5924 return nullptr; 5925 } 5926 } else { 5927 far_call(zero_blocks); 5928 } 5929 } 5930 bind(around); 5931 5932 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5933 // for us. 5934 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5935 Label l; 5936 tbz(cnt, exact_log2(i), l); 5937 for (int j = 0; j < i; j += 2) { 5938 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5939 } 5940 bind(l); 5941 } 5942 { 5943 Label l; 5944 tbz(cnt, 0, l); 5945 str(zr, Address(ptr)); 5946 bind(l); 5947 } 5948 5949 BLOCK_COMMENT("} zero_words"); 5950 return pc(); 5951 } 5952 5953 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5954 // cnt: Immediate count in HeapWords. 5955 // 5956 // r10, r11, rscratch1, and rscratch2 are clobbered. 5957 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5958 { 5959 assert(wordSize <= BlockZeroingLowLimit, 5960 "increase BlockZeroingLowLimit"); 5961 address result = nullptr; 5962 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5963 #ifndef PRODUCT 5964 { 5965 char buf[64]; 5966 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5967 BLOCK_COMMENT(buf); 5968 } 5969 #endif 5970 if (cnt >= 16) { 5971 uint64_t loops = cnt/16; 5972 if (loops > 1) { 5973 mov(rscratch2, loops - 1); 5974 } 5975 { 5976 Label loop; 5977 bind(loop); 5978 for (int i = 0; i < 16; i += 2) { 5979 stp(zr, zr, Address(base, i * BytesPerWord)); 5980 } 5981 add(base, base, 16 * BytesPerWord); 5982 if (loops > 1) { 5983 subs(rscratch2, rscratch2, 1); 5984 br(GE, loop); 5985 } 5986 } 5987 } 5988 cnt %= 16; 5989 int i = cnt & 1; // store any odd word to start 5990 if (i) str(zr, Address(base)); 5991 for (; i < (int)cnt; i += 2) { 5992 stp(zr, zr, Address(base, i * wordSize)); 5993 } 5994 BLOCK_COMMENT("} zero_words"); 5995 result = pc(); 5996 } else { 5997 mov(r10, base); mov(r11, cnt); 5998 result = zero_words(r10, r11); 5999 } 6000 return result; 6001 } 6002 6003 // Zero blocks of memory by using DC ZVA. 6004 // 6005 // Aligns the base address first sufficiently for DC ZVA, then uses 6006 // DC ZVA repeatedly for every full block. cnt is the size to be 6007 // zeroed in HeapWords. Returns the count of words left to be zeroed 6008 // in cnt. 6009 // 6010 // NOTE: This is intended to be used in the zero_blocks() stub. If 6011 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6012 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6013 Register tmp = rscratch1; 6014 Register tmp2 = rscratch2; 6015 int zva_length = VM_Version::zva_length(); 6016 Label initial_table_end, loop_zva; 6017 Label fini; 6018 6019 // Base must be 16 byte aligned. If not just return and let caller handle it 6020 tst(base, 0x0f); 6021 br(Assembler::NE, fini); 6022 // Align base with ZVA length. 6023 neg(tmp, base); 6024 andr(tmp, tmp, zva_length - 1); 6025 6026 // tmp: the number of bytes to be filled to align the base with ZVA length. 6027 add(base, base, tmp); 6028 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6029 adr(tmp2, initial_table_end); 6030 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6031 br(tmp2); 6032 6033 for (int i = -zva_length + 16; i < 0; i += 16) 6034 stp(zr, zr, Address(base, i)); 6035 bind(initial_table_end); 6036 6037 sub(cnt, cnt, zva_length >> 3); 6038 bind(loop_zva); 6039 dc(Assembler::ZVA, base); 6040 subs(cnt, cnt, zva_length >> 3); 6041 add(base, base, zva_length); 6042 br(Assembler::GE, loop_zva); 6043 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6044 bind(fini); 6045 } 6046 6047 // base: Address of a buffer to be filled, 8 bytes aligned. 6048 // cnt: Count in 8-byte unit. 6049 // value: Value to be filled with. 6050 // base will point to the end of the buffer after filling. 6051 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6052 { 6053 // Algorithm: 6054 // 6055 // if (cnt == 0) { 6056 // return; 6057 // } 6058 // if ((p & 8) != 0) { 6059 // *p++ = v; 6060 // } 6061 // 6062 // scratch1 = cnt & 14; 6063 // cnt -= scratch1; 6064 // p += scratch1; 6065 // switch (scratch1 / 2) { 6066 // do { 6067 // cnt -= 16; 6068 // p[-16] = v; 6069 // p[-15] = v; 6070 // case 7: 6071 // p[-14] = v; 6072 // p[-13] = v; 6073 // case 6: 6074 // p[-12] = v; 6075 // p[-11] = v; 6076 // // ... 6077 // case 1: 6078 // p[-2] = v; 6079 // p[-1] = v; 6080 // case 0: 6081 // p += 16; 6082 // } while (cnt); 6083 // } 6084 // if ((cnt & 1) == 1) { 6085 // *p++ = v; 6086 // } 6087 6088 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6089 6090 Label fini, skip, entry, loop; 6091 const int unroll = 8; // Number of stp instructions we'll unroll 6092 6093 cbz(cnt, fini); 6094 tbz(base, 3, skip); 6095 str(value, Address(post(base, 8))); 6096 sub(cnt, cnt, 1); 6097 bind(skip); 6098 6099 andr(rscratch1, cnt, (unroll-1) * 2); 6100 sub(cnt, cnt, rscratch1); 6101 add(base, base, rscratch1, Assembler::LSL, 3); 6102 adr(rscratch2, entry); 6103 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6104 br(rscratch2); 6105 6106 bind(loop); 6107 add(base, base, unroll * 16); 6108 for (int i = -unroll; i < 0; i++) 6109 stp(value, value, Address(base, i * 16)); 6110 bind(entry); 6111 subs(cnt, cnt, unroll * 2); 6112 br(Assembler::GE, loop); 6113 6114 tbz(cnt, 0, fini); 6115 str(value, Address(post(base, 8))); 6116 bind(fini); 6117 } 6118 6119 // Intrinsic for 6120 // 6121 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6122 // return the number of characters copied. 6123 // - java/lang/StringUTF16.compress 6124 // return index of non-latin1 character if copy fails, otherwise 'len'. 6125 // 6126 // This version always returns the number of characters copied, and does not 6127 // clobber the 'len' register. A successful copy will complete with the post- 6128 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6129 // post-condition: 0 <= 'res' < 'len'. 6130 // 6131 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6132 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6133 // beyond the acceptable, even though the footprint would be smaller. 6134 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6135 // avoid additional bloat. 6136 // 6137 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6138 void MacroAssembler::encode_iso_array(Register src, Register dst, 6139 Register len, Register res, bool ascii, 6140 FloatRegister vtmp0, FloatRegister vtmp1, 6141 FloatRegister vtmp2, FloatRegister vtmp3, 6142 FloatRegister vtmp4, FloatRegister vtmp5) 6143 { 6144 Register cnt = res; 6145 Register max = rscratch1; 6146 Register chk = rscratch2; 6147 6148 prfm(Address(src), PLDL1STRM); 6149 movw(cnt, len); 6150 6151 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6152 6153 Label LOOP_32, DONE_32, FAIL_32; 6154 6155 BIND(LOOP_32); 6156 { 6157 cmpw(cnt, 32); 6158 br(LT, DONE_32); 6159 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6160 // Extract lower bytes. 6161 FloatRegister vlo0 = vtmp4; 6162 FloatRegister vlo1 = vtmp5; 6163 uzp1(vlo0, T16B, vtmp0, vtmp1); 6164 uzp1(vlo1, T16B, vtmp2, vtmp3); 6165 // Merge bits... 6166 orr(vtmp0, T16B, vtmp0, vtmp1); 6167 orr(vtmp2, T16B, vtmp2, vtmp3); 6168 // Extract merged upper bytes. 6169 FloatRegister vhix = vtmp0; 6170 uzp2(vhix, T16B, vtmp0, vtmp2); 6171 // ISO-check on hi-parts (all zero). 6172 // ASCII-check on lo-parts (no sign). 6173 FloatRegister vlox = vtmp1; // Merge lower bytes. 6174 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6175 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6176 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6177 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6178 ASCII(orr(chk, chk, max)); 6179 cbnz(chk, FAIL_32); 6180 subw(cnt, cnt, 32); 6181 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6182 b(LOOP_32); 6183 } 6184 BIND(FAIL_32); 6185 sub(src, src, 64); 6186 BIND(DONE_32); 6187 6188 Label LOOP_8, SKIP_8; 6189 6190 BIND(LOOP_8); 6191 { 6192 cmpw(cnt, 8); 6193 br(LT, SKIP_8); 6194 FloatRegister vhi = vtmp0; 6195 FloatRegister vlo = vtmp1; 6196 ld1(vtmp3, T8H, src); 6197 uzp1(vlo, T16B, vtmp3, vtmp3); 6198 uzp2(vhi, T16B, vtmp3, vtmp3); 6199 // ISO-check on hi-parts (all zero). 6200 // ASCII-check on lo-parts (no sign). 6201 ASCII(cm(LT, vtmp2, T16B, vlo)); 6202 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6203 ASCII(umov(max, vtmp2, B, 0)); 6204 ASCII(orr(chk, chk, max)); 6205 cbnz(chk, SKIP_8); 6206 6207 strd(vlo, Address(post(dst, 8))); 6208 subw(cnt, cnt, 8); 6209 add(src, src, 16); 6210 b(LOOP_8); 6211 } 6212 BIND(SKIP_8); 6213 6214 #undef ASCII 6215 6216 Label LOOP, DONE; 6217 6218 cbz(cnt, DONE); 6219 BIND(LOOP); 6220 { 6221 Register chr = rscratch1; 6222 ldrh(chr, Address(post(src, 2))); 6223 tst(chr, ascii ? 0xff80 : 0xff00); 6224 br(NE, DONE); 6225 strb(chr, Address(post(dst, 1))); 6226 subs(cnt, cnt, 1); 6227 br(GT, LOOP); 6228 } 6229 BIND(DONE); 6230 // Return index where we stopped. 6231 subw(res, len, cnt); 6232 } 6233 6234 // Inflate byte[] array to char[]. 6235 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6236 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6237 FloatRegister vtmp1, FloatRegister vtmp2, 6238 FloatRegister vtmp3, Register tmp4) { 6239 Label big, done, after_init, to_stub; 6240 6241 assert_different_registers(src, dst, len, tmp4, rscratch1); 6242 6243 fmovd(vtmp1, 0.0); 6244 lsrw(tmp4, len, 3); 6245 bind(after_init); 6246 cbnzw(tmp4, big); 6247 // Short string: less than 8 bytes. 6248 { 6249 Label loop, tiny; 6250 6251 cmpw(len, 4); 6252 br(LT, tiny); 6253 // Use SIMD to do 4 bytes. 6254 ldrs(vtmp2, post(src, 4)); 6255 zip1(vtmp3, T8B, vtmp2, vtmp1); 6256 subw(len, len, 4); 6257 strd(vtmp3, post(dst, 8)); 6258 6259 cbzw(len, done); 6260 6261 // Do the remaining bytes by steam. 6262 bind(loop); 6263 ldrb(tmp4, post(src, 1)); 6264 strh(tmp4, post(dst, 2)); 6265 subw(len, len, 1); 6266 6267 bind(tiny); 6268 cbnz(len, loop); 6269 6270 b(done); 6271 } 6272 6273 if (SoftwarePrefetchHintDistance >= 0) { 6274 bind(to_stub); 6275 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6276 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6277 address tpc = trampoline_call(stub); 6278 if (tpc == nullptr) { 6279 DEBUG_ONLY(reset_labels(big, done)); 6280 postcond(pc() == badAddress); 6281 return nullptr; 6282 } 6283 b(after_init); 6284 } 6285 6286 // Unpack the bytes 8 at a time. 6287 bind(big); 6288 { 6289 Label loop, around, loop_last, loop_start; 6290 6291 if (SoftwarePrefetchHintDistance >= 0) { 6292 const int large_loop_threshold = (64 + 16)/8; 6293 ldrd(vtmp2, post(src, 8)); 6294 andw(len, len, 7); 6295 cmp(tmp4, (u1)large_loop_threshold); 6296 br(GE, to_stub); 6297 b(loop_start); 6298 6299 bind(loop); 6300 ldrd(vtmp2, post(src, 8)); 6301 bind(loop_start); 6302 subs(tmp4, tmp4, 1); 6303 br(EQ, loop_last); 6304 zip1(vtmp2, T16B, vtmp2, vtmp1); 6305 ldrd(vtmp3, post(src, 8)); 6306 st1(vtmp2, T8H, post(dst, 16)); 6307 subs(tmp4, tmp4, 1); 6308 zip1(vtmp3, T16B, vtmp3, vtmp1); 6309 st1(vtmp3, T8H, post(dst, 16)); 6310 br(NE, loop); 6311 b(around); 6312 bind(loop_last); 6313 zip1(vtmp2, T16B, vtmp2, vtmp1); 6314 st1(vtmp2, T8H, post(dst, 16)); 6315 bind(around); 6316 cbz(len, done); 6317 } else { 6318 andw(len, len, 7); 6319 bind(loop); 6320 ldrd(vtmp2, post(src, 8)); 6321 sub(tmp4, tmp4, 1); 6322 zip1(vtmp3, T16B, vtmp2, vtmp1); 6323 st1(vtmp3, T8H, post(dst, 16)); 6324 cbnz(tmp4, loop); 6325 } 6326 } 6327 6328 // Do the tail of up to 8 bytes. 6329 add(src, src, len); 6330 ldrd(vtmp3, Address(src, -8)); 6331 add(dst, dst, len, ext::uxtw, 1); 6332 zip1(vtmp3, T16B, vtmp3, vtmp1); 6333 strq(vtmp3, Address(dst, -16)); 6334 6335 bind(done); 6336 postcond(pc() != badAddress); 6337 return pc(); 6338 } 6339 6340 // Compress char[] array to byte[]. 6341 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6342 // Return the array length if every element in array can be encoded, 6343 // otherwise, the index of first non-latin1 (> 0xff) character. 6344 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6345 Register res, 6346 FloatRegister tmp0, FloatRegister tmp1, 6347 FloatRegister tmp2, FloatRegister tmp3, 6348 FloatRegister tmp4, FloatRegister tmp5) { 6349 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6350 } 6351 6352 // java.math.round(double a) 6353 // Returns the closest long to the argument, with ties rounding to 6354 // positive infinity. This requires some fiddling for corner 6355 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6356 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6357 FloatRegister ftmp) { 6358 Label DONE; 6359 BLOCK_COMMENT("java_round_double: { "); 6360 fmovd(rscratch1, src); 6361 // Use RoundToNearestTiesAway unless src small and -ve. 6362 fcvtasd(dst, src); 6363 // Test if src >= 0 || abs(src) >= 0x1.0p52 6364 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6365 mov(rscratch2, julong_cast(0x1.0p52)); 6366 cmp(rscratch1, rscratch2); 6367 br(HS, DONE); { 6368 // src < 0 && abs(src) < 0x1.0p52 6369 // src may have a fractional part, so add 0.5 6370 fmovd(ftmp, 0.5); 6371 faddd(ftmp, src, ftmp); 6372 // Convert double to jlong, use RoundTowardsNegative 6373 fcvtmsd(dst, ftmp); 6374 } 6375 bind(DONE); 6376 BLOCK_COMMENT("} java_round_double"); 6377 } 6378 6379 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6380 FloatRegister ftmp) { 6381 Label DONE; 6382 BLOCK_COMMENT("java_round_float: { "); 6383 fmovs(rscratch1, src); 6384 // Use RoundToNearestTiesAway unless src small and -ve. 6385 fcvtassw(dst, src); 6386 // Test if src >= 0 || abs(src) >= 0x1.0p23 6387 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6388 mov(rscratch2, jint_cast(0x1.0p23f)); 6389 cmp(rscratch1, rscratch2); 6390 br(HS, DONE); { 6391 // src < 0 && |src| < 0x1.0p23 6392 // src may have a fractional part, so add 0.5 6393 fmovs(ftmp, 0.5f); 6394 fadds(ftmp, src, ftmp); 6395 // Convert float to jint, use RoundTowardsNegative 6396 fcvtmssw(dst, ftmp); 6397 } 6398 bind(DONE); 6399 BLOCK_COMMENT("} java_round_float"); 6400 } 6401 6402 // get_thread() can be called anywhere inside generated code so we 6403 // need to save whatever non-callee save context might get clobbered 6404 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6405 // the call setup code. 6406 // 6407 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6408 // On other systems, the helper is a usual C function. 6409 // 6410 void MacroAssembler::get_thread(Register dst) { 6411 RegSet saved_regs = 6412 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6413 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6414 6415 protect_return_address(); 6416 push(saved_regs, sp); 6417 6418 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6419 blr(lr); 6420 if (dst != c_rarg0) { 6421 mov(dst, c_rarg0); 6422 } 6423 6424 pop(saved_regs, sp); 6425 authenticate_return_address(); 6426 } 6427 6428 void MacroAssembler::cache_wb(Address line) { 6429 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6430 assert(line.index() == noreg, "index should be noreg"); 6431 assert(line.offset() == 0, "offset should be 0"); 6432 // would like to assert this 6433 // assert(line._ext.shift == 0, "shift should be zero"); 6434 if (VM_Version::supports_dcpop()) { 6435 // writeback using clear virtual address to point of persistence 6436 dc(Assembler::CVAP, line.base()); 6437 } else { 6438 // no need to generate anything as Unsafe.writebackMemory should 6439 // never invoke this stub 6440 } 6441 } 6442 6443 void MacroAssembler::cache_wbsync(bool is_pre) { 6444 // we only need a barrier post sync 6445 if (!is_pre) { 6446 membar(Assembler::AnyAny); 6447 } 6448 } 6449 6450 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6451 // Make sure that native code does not change SVE vector length. 6452 if (!UseSVE) return; 6453 Label verify_ok; 6454 movw(tmp, zr); 6455 sve_inc(tmp, B); 6456 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6457 br(EQ, verify_ok); 6458 stop("Error: SVE vector length has changed since jvm startup"); 6459 bind(verify_ok); 6460 } 6461 6462 void MacroAssembler::verify_ptrue() { 6463 Label verify_ok; 6464 if (!UseSVE) { 6465 return; 6466 } 6467 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6468 sve_dec(rscratch1, B); 6469 cbz(rscratch1, verify_ok); 6470 stop("Error: the preserved predicate register (p7) elements are not all true"); 6471 bind(verify_ok); 6472 } 6473 6474 void MacroAssembler::safepoint_isb() { 6475 isb(); 6476 #ifndef PRODUCT 6477 if (VerifyCrossModifyFence) { 6478 // Clear the thread state. 6479 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6480 } 6481 #endif 6482 } 6483 6484 #ifndef PRODUCT 6485 void MacroAssembler::verify_cross_modify_fence_not_required() { 6486 if (VerifyCrossModifyFence) { 6487 // Check if thread needs a cross modify fence. 6488 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6489 Label fence_not_required; 6490 cbz(rscratch1, fence_not_required); 6491 // If it does then fail. 6492 lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)); 6493 mov(c_rarg0, rthread); 6494 blr(rscratch1); 6495 bind(fence_not_required); 6496 } 6497 } 6498 #endif 6499 6500 void MacroAssembler::spin_wait() { 6501 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6502 switch (VM_Version::spin_wait_desc().inst()) { 6503 case SpinWait::NOP: 6504 nop(); 6505 break; 6506 case SpinWait::ISB: 6507 isb(); 6508 break; 6509 case SpinWait::YIELD: 6510 yield(); 6511 break; 6512 default: 6513 ShouldNotReachHere(); 6514 } 6515 } 6516 } 6517 6518 // Stack frame creation/removal 6519 6520 void MacroAssembler::enter(bool strip_ret_addr) { 6521 if (strip_ret_addr) { 6522 // Addresses can only be signed once. If there are multiple nested frames being created 6523 // in the same function, then the return address needs stripping first. 6524 strip_return_address(); 6525 } 6526 protect_return_address(); 6527 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6528 mov(rfp, sp); 6529 } 6530 6531 void MacroAssembler::leave() { 6532 mov(sp, rfp); 6533 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6534 authenticate_return_address(); 6535 } 6536 6537 // ROP Protection 6538 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6539 // destroying stack frames or whenever directly loading/storing the LR to memory. 6540 // If ROP protection is not set then these functions are no-ops. 6541 // For more details on PAC see pauth_aarch64.hpp. 6542 6543 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6544 // Uses value zero as the modifier. 6545 // 6546 void MacroAssembler::protect_return_address() { 6547 if (VM_Version::use_rop_protection()) { 6548 check_return_address(); 6549 paciaz(); 6550 } 6551 } 6552 6553 // Sign the return value in the given register. Use before updating the LR in the existing stack 6554 // frame for the current function. 6555 // Uses value zero as the modifier. 6556 // 6557 void MacroAssembler::protect_return_address(Register return_reg) { 6558 if (VM_Version::use_rop_protection()) { 6559 check_return_address(return_reg); 6560 paciza(return_reg); 6561 } 6562 } 6563 6564 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6565 // Uses value zero as the modifier. 6566 // 6567 void MacroAssembler::authenticate_return_address() { 6568 if (VM_Version::use_rop_protection()) { 6569 autiaz(); 6570 check_return_address(); 6571 } 6572 } 6573 6574 // Authenticate the return value in the given register. Use before updating the LR in the existing 6575 // stack frame for the current function. 6576 // Uses value zero as the modifier. 6577 // 6578 void MacroAssembler::authenticate_return_address(Register return_reg) { 6579 if (VM_Version::use_rop_protection()) { 6580 autiza(return_reg); 6581 check_return_address(return_reg); 6582 } 6583 } 6584 6585 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6586 // there is no guaranteed way of authenticating the LR. 6587 // 6588 void MacroAssembler::strip_return_address() { 6589 if (VM_Version::use_rop_protection()) { 6590 xpaclri(); 6591 } 6592 } 6593 6594 #ifndef PRODUCT 6595 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6596 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6597 // it is difficult to debug back to the callee function. 6598 // This function simply loads from the address in the given register. 6599 // Use directly after authentication to catch authentication failures. 6600 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6601 // 6602 void MacroAssembler::check_return_address(Register return_reg) { 6603 if (VM_Version::use_rop_protection()) { 6604 ldr(zr, Address(return_reg)); 6605 } 6606 } 6607 #endif 6608 6609 // The java_calling_convention describes stack locations as ideal slots on 6610 // a frame with no abi restrictions. Since we must observe abi restrictions 6611 // (like the placement of the register window) the slots must be biased by 6612 // the following value. 6613 static int reg2offset_in(VMReg r) { 6614 // Account for saved rfp and lr 6615 // This should really be in_preserve_stack_slots 6616 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6617 } 6618 6619 static int reg2offset_out(VMReg r) { 6620 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6621 } 6622 6623 // On 64bit we will store integer like items to the stack as 6624 // 64bits items (AArch64 ABI) even though java would only store 6625 // 32bits for a parameter. On 32bit it will simply be 32bits 6626 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6627 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6628 if (src.first()->is_stack()) { 6629 if (dst.first()->is_stack()) { 6630 // stack to stack 6631 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6632 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6633 } else { 6634 // stack to reg 6635 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6636 } 6637 } else if (dst.first()->is_stack()) { 6638 // reg to stack 6639 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6640 } else { 6641 if (dst.first() != src.first()) { 6642 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6643 } 6644 } 6645 } 6646 6647 // An oop arg. Must pass a handle not the oop itself 6648 void MacroAssembler::object_move( 6649 OopMap* map, 6650 int oop_handle_offset, 6651 int framesize_in_slots, 6652 VMRegPair src, 6653 VMRegPair dst, 6654 bool is_receiver, 6655 int* receiver_offset) { 6656 6657 // must pass a handle. First figure out the location we use as a handle 6658 6659 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6660 6661 // See if oop is null if it is we need no handle 6662 6663 if (src.first()->is_stack()) { 6664 6665 // Oop is already on the stack as an argument 6666 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6667 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6668 if (is_receiver) { 6669 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6670 } 6671 6672 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6673 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6674 // conditionally move a null 6675 cmp(rscratch1, zr); 6676 csel(rHandle, zr, rHandle, Assembler::EQ); 6677 } else { 6678 6679 // Oop is in an a register we must store it to the space we reserve 6680 // on the stack for oop_handles and pass a handle if oop is non-null 6681 6682 const Register rOop = src.first()->as_Register(); 6683 int oop_slot; 6684 if (rOop == j_rarg0) 6685 oop_slot = 0; 6686 else if (rOop == j_rarg1) 6687 oop_slot = 1; 6688 else if (rOop == j_rarg2) 6689 oop_slot = 2; 6690 else if (rOop == j_rarg3) 6691 oop_slot = 3; 6692 else if (rOop == j_rarg4) 6693 oop_slot = 4; 6694 else if (rOop == j_rarg5) 6695 oop_slot = 5; 6696 else if (rOop == j_rarg6) 6697 oop_slot = 6; 6698 else { 6699 assert(rOop == j_rarg7, "wrong register"); 6700 oop_slot = 7; 6701 } 6702 6703 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6704 int offset = oop_slot*VMRegImpl::stack_slot_size; 6705 6706 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6707 // Store oop in handle area, may be null 6708 str(rOop, Address(sp, offset)); 6709 if (is_receiver) { 6710 *receiver_offset = offset; 6711 } 6712 6713 cmp(rOop, zr); 6714 lea(rHandle, Address(sp, offset)); 6715 // conditionally move a null 6716 csel(rHandle, zr, rHandle, Assembler::EQ); 6717 } 6718 6719 // If arg is on the stack then place it otherwise it is already in correct reg. 6720 if (dst.first()->is_stack()) { 6721 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6722 } 6723 } 6724 6725 // A float arg may have to do float reg int reg conversion 6726 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6727 if (src.first()->is_stack()) { 6728 if (dst.first()->is_stack()) { 6729 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6730 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6731 } else { 6732 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6733 } 6734 } else if (src.first() != dst.first()) { 6735 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6736 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6737 else 6738 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6739 } 6740 } 6741 6742 // A long move 6743 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6744 if (src.first()->is_stack()) { 6745 if (dst.first()->is_stack()) { 6746 // stack to stack 6747 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6748 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6749 } else { 6750 // stack to reg 6751 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6752 } 6753 } else if (dst.first()->is_stack()) { 6754 // reg to stack 6755 // Do we really have to sign extend??? 6756 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6757 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6758 } else { 6759 if (dst.first() != src.first()) { 6760 mov(dst.first()->as_Register(), src.first()->as_Register()); 6761 } 6762 } 6763 } 6764 6765 6766 // A double move 6767 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6768 if (src.first()->is_stack()) { 6769 if (dst.first()->is_stack()) { 6770 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6771 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6772 } else { 6773 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6774 } 6775 } else if (src.first() != dst.first()) { 6776 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6777 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6778 else 6779 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6780 } 6781 } 6782 6783 // Implements lightweight-locking. 6784 // 6785 // - obj: the object to be locked 6786 // - t1, t2, t3: temporary registers, will be destroyed 6787 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 6788 void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6789 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6790 assert_different_registers(obj, t1, t2, t3, rscratch1); 6791 6792 Label push; 6793 const Register top = t1; 6794 const Register mark = t2; 6795 const Register t = t3; 6796 6797 // Preload the markWord. It is important that this is the first 6798 // instruction emitted as it is part of C1's null check semantics. 6799 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6800 6801 // Check if the lock-stack is full. 6802 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6803 cmpw(top, (unsigned)LockStack::end_offset()); 6804 br(Assembler::GE, slow); 6805 6806 // Check for recursion. 6807 subw(t, top, oopSize); 6808 ldr(t, Address(rthread, t)); 6809 cmp(obj, t); 6810 br(Assembler::EQ, push); 6811 6812 // Check header for monitor (0b10). 6813 tst(mark, markWord::monitor_value); 6814 br(Assembler::NE, slow); 6815 6816 // Try to lock. Transition lock bits 0b01 => 0b00 6817 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6818 orr(mark, mark, markWord::unlocked_value); 6819 eor(t, mark, markWord::unlocked_value); 6820 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 6821 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 6822 br(Assembler::NE, slow); 6823 6824 bind(push); 6825 // After successful lock, push object on lock-stack. 6826 str(obj, Address(rthread, top)); 6827 addw(top, top, oopSize); 6828 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6829 } 6830 6831 // Implements lightweight-unlocking. 6832 // 6833 // - obj: the object to be unlocked 6834 // - t1, t2, t3: temporary registers 6835 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 6836 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6837 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6838 // cmpxchg clobbers rscratch1. 6839 assert_different_registers(obj, t1, t2, t3, rscratch1); 6840 6841 #ifdef ASSERT 6842 { 6843 // Check for lock-stack underflow. 6844 Label stack_ok; 6845 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6846 cmpw(t1, (unsigned)LockStack::start_offset()); 6847 br(Assembler::GE, stack_ok); 6848 STOP("Lock-stack underflow"); 6849 bind(stack_ok); 6850 } 6851 #endif 6852 6853 Label unlocked, push_and_slow; 6854 const Register top = t1; 6855 const Register mark = t2; 6856 const Register t = t3; 6857 6858 // Check if obj is top of lock-stack. 6859 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6860 subw(top, top, oopSize); 6861 ldr(t, Address(rthread, top)); 6862 cmp(obj, t); 6863 br(Assembler::NE, slow); 6864 6865 // Pop lock-stack. 6866 DEBUG_ONLY(str(zr, Address(rthread, top));) 6867 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6868 6869 // Check if recursive. 6870 subw(t, top, oopSize); 6871 ldr(t, Address(rthread, t)); 6872 cmp(obj, t); 6873 br(Assembler::EQ, unlocked); 6874 6875 // Not recursive. Check header for monitor (0b10). 6876 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6877 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 6878 6879 #ifdef ASSERT 6880 // Check header not unlocked (0b01). 6881 Label not_unlocked; 6882 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 6883 stop("lightweight_unlock already unlocked"); 6884 bind(not_unlocked); 6885 #endif 6886 6887 // Try to unlock. Transition lock bits 0b00 => 0b01 6888 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6889 orr(t, mark, markWord::unlocked_value); 6890 cmpxchg(obj, mark, t, Assembler::xword, 6891 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 6892 br(Assembler::EQ, unlocked); 6893 6894 bind(push_and_slow); 6895 // Restore lock-stack and handle the unlock in runtime. 6896 DEBUG_ONLY(str(obj, Address(rthread, top));) 6897 addw(top, top, oopSize); 6898 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6899 b(slow); 6900 6901 bind(unlocked); 6902 }