1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "ci/ciEnv.hpp" 29 #include "ci/ciUtilities.hpp" 30 #include "code/compiledIC.hpp" 31 #if INCLUDE_CDS 32 #include "code/SCCache.hpp" 33 #endif 34 #include "compiler/compileTask.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "compiler/oopMap.hpp" 37 #include "gc/shared/barrierSet.hpp" 38 #include "gc/shared/barrierSetAssembler.hpp" 39 #include "gc/shared/cardTableBarrierSet.hpp" 40 #include "gc/shared/cardTable.hpp" 41 #include "gc/shared/collectedHeap.hpp" 42 #include "gc/shared/tlab_globals.hpp" 43 #include "interpreter/bytecodeHistogram.hpp" 44 #include "interpreter/interpreter.hpp" 45 #include "interpreter/interpreterRuntime.hpp" 46 #include "jvm.h" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "nativeInst_aarch64.hpp" 50 #include "oops/accessDecorators.hpp" 51 #include "oops/compressedKlass.inline.hpp" 52 #include "oops/compressedOops.inline.hpp" 53 #include "oops/klass.inline.hpp" 54 #include "runtime/continuation.hpp" 55 #include "runtime/icache.hpp" 56 #include "runtime/interfaceSupport.inline.hpp" 57 #include "runtime/javaThread.hpp" 58 #include "runtime/jniHandles.inline.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "utilities/globalDefinitions.hpp" 62 #include "utilities/powerOfTwo.hpp" 63 #ifdef COMPILER1 64 #include "c1/c1_LIRAssembler.hpp" 65 #endif 66 #ifdef COMPILER2 67 #include "oops/oop.hpp" 68 #include "opto/compile.hpp" 69 #include "opto/node.hpp" 70 #include "opto/output.hpp" 71 #endif 72 73 #include <sys/types.h> 74 75 #ifdef PRODUCT 76 #define BLOCK_COMMENT(str) /* nothing */ 77 #else 78 #define BLOCK_COMMENT(str) block_comment(str) 79 #endif 80 #define STOP(str) stop(str); 81 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 82 83 #ifdef ASSERT 84 extern "C" void disnm(intptr_t p); 85 #endif 86 // Target-dependent relocation processing 87 // 88 // Instruction sequences whose target may need to be retrieved or 89 // patched are distinguished by their leading instruction, sorting 90 // them into three main instruction groups and related subgroups. 91 // 92 // 1) Branch, Exception and System (insn count = 1) 93 // 1a) Unconditional branch (immediate): 94 // b/bl imm19 95 // 1b) Compare & branch (immediate): 96 // cbz/cbnz Rt imm19 97 // 1c) Test & branch (immediate): 98 // tbz/tbnz Rt imm14 99 // 1d) Conditional branch (immediate): 100 // b.cond imm19 101 // 102 // 2) Loads and Stores (insn count = 1) 103 // 2a) Load register literal: 104 // ldr Rt imm19 105 // 106 // 3) Data Processing Immediate (insn count = 2 or 3) 107 // 3a) PC-rel. addressing 108 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 109 // adr/adrp Rx imm21; add Ry Rx #imm12 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 111 // adr/adrp Rx imm21 112 // adr/adrp Rx imm21; movk Rx #imm16<<32 113 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 114 // The latter form can only happen when the target is an 115 // ExternalAddress, and (by definition) ExternalAddresses don't 116 // move. Because of that property, there is never any need to 117 // patch the last of the three instructions. However, 118 // MacroAssembler::target_addr_for_insn takes all three 119 // instructions into account and returns the correct address. 120 // 3b) Move wide (immediate) 121 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 122 // 123 // A switch on a subset of the instruction's bits provides an 124 // efficient dispatch to these subcases. 125 // 126 // insn[28:26] -> main group ('x' == don't care) 127 // 00x -> UNALLOCATED 128 // 100 -> Data Processing Immediate 129 // 101 -> Branch, Exception and System 130 // x1x -> Loads and Stores 131 // 132 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 133 // n.b. in some cases extra bits need to be checked to verify the 134 // instruction is as expected 135 // 136 // 1) ... xx101x Branch, Exception and System 137 // 1a) 00___x Unconditional branch (immediate) 138 // 1b) 01___0 Compare & branch (immediate) 139 // 1c) 01___1 Test & branch (immediate) 140 // 1d) 10___0 Conditional branch (immediate) 141 // other Should not happen 142 // 143 // 2) ... xxx1x0 Loads and Stores 144 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 145 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 146 // strictly should be 64 bit non-FP/SIMD i.e. 147 // 0101_000 (i.e. requires insn[31:24] == 01011000) 148 // 149 // 3) ... xx100x Data Processing Immediate 150 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 151 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 152 // strictly should be 64 bit movz #imm16<<0 153 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 154 // 155 class RelocActions { 156 protected: 157 typedef int (*reloc_insn)(address insn_addr, address &target); 158 159 virtual reloc_insn adrpMem() = 0; 160 virtual reloc_insn adrpAdd() = 0; 161 virtual reloc_insn adrpMovk() = 0; 162 163 const address _insn_addr; 164 const uint32_t _insn; 165 166 static uint32_t insn_at(address insn_addr, int n) { 167 return ((uint32_t*)insn_addr)[n]; 168 } 169 uint32_t insn_at(int n) const { 170 return insn_at(_insn_addr, n); 171 } 172 173 public: 174 175 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 176 RelocActions(address insn_addr, uint32_t insn) 177 : _insn_addr(insn_addr), _insn(insn) {} 178 179 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 180 virtual int conditionalBranch(address insn_addr, address &target) = 0; 181 virtual int testAndBranch(address insn_addr, address &target) = 0; 182 virtual int loadStore(address insn_addr, address &target) = 0; 183 virtual int adr(address insn_addr, address &target) = 0; 184 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 185 virtual int immediate(address insn_addr, address &target) = 0; 186 virtual void verify(address insn_addr, address &target) = 0; 187 188 int ALWAYSINLINE run(address insn_addr, address &target) { 189 int instructions = 1; 190 191 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 192 switch(dispatch) { 193 case 0b001010: 194 case 0b001011: { 195 instructions = unconditionalBranch(insn_addr, target); 196 break; 197 } 198 case 0b101010: // Conditional branch (immediate) 199 case 0b011010: { // Compare & branch (immediate) 200 instructions = conditionalBranch(insn_addr, target); 201 break; 202 } 203 case 0b011011: { 204 instructions = testAndBranch(insn_addr, target); 205 break; 206 } 207 case 0b001100: 208 case 0b001110: 209 case 0b011100: 210 case 0b011110: 211 case 0b101100: 212 case 0b101110: 213 case 0b111100: 214 case 0b111110: { 215 // load/store 216 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 217 // Load register (literal) 218 instructions = loadStore(insn_addr, target); 219 break; 220 } else { 221 // nothing to do 222 assert(target == nullptr, "did not expect to relocate target for polling page load"); 223 } 224 break; 225 } 226 case 0b001000: 227 case 0b011000: 228 case 0b101000: 229 case 0b111000: { 230 // adr/adrp 231 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 232 int shift = Instruction_aarch64::extract(_insn, 31, 31); 233 if (shift) { 234 uint32_t insn2 = insn_at(1); 235 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 236 Instruction_aarch64::extract(_insn, 4, 0) == 237 Instruction_aarch64::extract(insn2, 9, 5)) { 238 instructions = adrp(insn_addr, target, adrpMem()); 239 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 240 Instruction_aarch64::extract(_insn, 4, 0) == 241 Instruction_aarch64::extract(insn2, 4, 0)) { 242 instructions = adrp(insn_addr, target, adrpAdd()); 243 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 244 Instruction_aarch64::extract(_insn, 4, 0) == 245 Instruction_aarch64::extract(insn2, 4, 0)) { 246 instructions = adrp(insn_addr, target, adrpMovk()); 247 } else { 248 ShouldNotReachHere(); 249 } 250 } else { 251 instructions = adr(insn_addr, target); 252 } 253 break; 254 } 255 case 0b001001: 256 case 0b011001: 257 case 0b101001: 258 case 0b111001: { 259 instructions = immediate(insn_addr, target); 260 break; 261 } 262 default: { 263 ShouldNotReachHere(); 264 } 265 } 266 267 verify(insn_addr, target); 268 return instructions * NativeInstruction::instruction_size; 269 } 270 }; 271 272 class Patcher : public RelocActions { 273 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 274 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 275 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 276 277 public: 278 Patcher(address insn_addr) : RelocActions(insn_addr) {} 279 280 virtual int unconditionalBranch(address insn_addr, address &target) { 281 intptr_t offset = (target - insn_addr) >> 2; 282 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 283 return 1; 284 } 285 virtual int conditionalBranch(address insn_addr, address &target) { 286 intptr_t offset = (target - insn_addr) >> 2; 287 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 288 return 1; 289 } 290 virtual int testAndBranch(address insn_addr, address &target) { 291 intptr_t offset = (target - insn_addr) >> 2; 292 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 293 return 1; 294 } 295 virtual int loadStore(address insn_addr, address &target) { 296 intptr_t offset = (target - insn_addr) >> 2; 297 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 298 return 1; 299 } 300 virtual int adr(address insn_addr, address &target) { 301 #ifdef ASSERT 302 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 303 #endif 304 // PC-rel. addressing 305 ptrdiff_t offset = target - insn_addr; 306 int offset_lo = offset & 3; 307 offset >>= 2; 308 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 309 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 310 return 1; 311 } 312 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 313 int instructions = 1; 314 #ifdef ASSERT 315 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 316 #endif 317 ptrdiff_t offset = target - insn_addr; 318 instructions = 2; 319 precond(inner != nullptr); 320 // Give the inner reloc a chance to modify the target. 321 address adjusted_target = target; 322 instructions = (*inner)(insn_addr, adjusted_target); 323 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 324 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 325 offset = adr_page - pc_page; 326 int offset_lo = offset & 3; 327 offset >>= 2; 328 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 329 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 330 return instructions; 331 } 332 static int adrpMem_impl(address insn_addr, address &target) { 333 uintptr_t dest = (uintptr_t)target; 334 int offset_lo = dest & 0xfff; 335 uint32_t insn2 = insn_at(insn_addr, 1); 336 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 337 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 338 guarantee(((dest >> size) << size) == dest, "misaligned target"); 339 return 2; 340 } 341 static int adrpAdd_impl(address insn_addr, address &target) { 342 uintptr_t dest = (uintptr_t)target; 343 int offset_lo = dest & 0xfff; 344 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 345 return 2; 346 } 347 static int adrpMovk_impl(address insn_addr, address &target) { 348 uintptr_t dest = uintptr_t(target); 349 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 350 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 351 target = address(dest); 352 return 2; 353 } 354 virtual int immediate(address insn_addr, address &target) { 355 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 356 // We encode narrow ones by setting the upper 16 bits in the first 357 // instruction. 358 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 359 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 360 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target); 361 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16); 362 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff); 363 return 2; 364 } 365 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 366 uint64_t dest = (uint64_t)target; 367 // Move wide constant 368 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 369 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 370 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 371 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 372 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 373 return 3; 374 } 375 virtual void verify(address insn_addr, address &target) { 376 #ifdef ASSERT 377 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 378 if (!(address_is == target)) { 379 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 380 disnm((intptr_t)insn_addr); 381 assert(address_is == target, "should be"); 382 } 383 #endif 384 } 385 }; 386 387 // If insn1 and insn2 use the same register to form an address, either 388 // by an offsetted LDR or a simple ADD, return the offset. If the 389 // second instruction is an LDR, the offset may be scaled. 390 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 391 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 392 Instruction_aarch64::extract(insn1, 4, 0) == 393 Instruction_aarch64::extract(insn2, 9, 5)) { 394 // Load/store register (unsigned immediate) 395 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 396 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 397 byte_offset <<= size; 398 return true; 399 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 400 Instruction_aarch64::extract(insn1, 4, 0) == 401 Instruction_aarch64::extract(insn2, 4, 0)) { 402 // add (immediate) 403 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 404 return true; 405 } 406 return false; 407 } 408 409 class AArch64Decoder : public RelocActions { 410 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 411 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 412 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 413 414 public: 415 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 416 417 virtual int loadStore(address insn_addr, address &target) { 418 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 419 target = insn_addr + (offset << 2); 420 return 1; 421 } 422 virtual int unconditionalBranch(address insn_addr, address &target) { 423 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 424 target = insn_addr + (offset << 2); 425 return 1; 426 } 427 virtual int conditionalBranch(address insn_addr, address &target) { 428 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 429 target = address(((uint64_t)insn_addr + (offset << 2))); 430 return 1; 431 } 432 virtual int testAndBranch(address insn_addr, address &target) { 433 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 434 target = address(((uint64_t)insn_addr + (offset << 2))); 435 return 1; 436 } 437 virtual int adr(address insn_addr, address &target) { 438 // PC-rel. addressing 439 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 440 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 441 target = address((uint64_t)insn_addr + offset); 442 return 1; 443 } 444 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 445 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 446 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 447 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 448 int shift = 12; 449 offset <<= shift; 450 uint64_t target_page = ((uint64_t)insn_addr) + offset; 451 target_page &= ((uint64_t)-1) << shift; 452 uint32_t insn2 = insn_at(1); 453 target = address(target_page); 454 precond(inner != nullptr); 455 (*inner)(insn_addr, target); 456 return 2; 457 } 458 static int adrpMem_impl(address insn_addr, address &target) { 459 uint32_t insn2 = insn_at(insn_addr, 1); 460 // Load/store register (unsigned immediate) 461 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 462 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 463 byte_offset <<= size; 464 target += byte_offset; 465 return 2; 466 } 467 static int adrpAdd_impl(address insn_addr, address &target) { 468 uint32_t insn2 = insn_at(insn_addr, 1); 469 // add (immediate) 470 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 471 target += byte_offset; 472 return 2; 473 } 474 static int adrpMovk_impl(address insn_addr, address &target) { 475 uint32_t insn2 = insn_at(insn_addr, 1); 476 uint64_t dest = uint64_t(target); 477 dest = (dest & 0xffff0000ffffffff) | 478 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 479 target = address(dest); 480 481 // We know the destination 4k page. Maybe we have a third 482 // instruction. 483 uint32_t insn = insn_at(insn_addr, 0); 484 uint32_t insn3 = insn_at(insn_addr, 2); 485 ptrdiff_t byte_offset; 486 if (offset_for(insn, insn3, byte_offset)) { 487 target += byte_offset; 488 return 3; 489 } else { 490 return 2; 491 } 492 } 493 virtual int immediate(address insn_addr, address &target) { 494 uint32_t *insns = (uint32_t *)insn_addr; 495 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 496 // We encode narrow ones by setting the upper 16 bits in the first 497 // instruction. 498 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 499 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 500 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16) 501 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5))); 502 target = (address)CompressedKlassPointers::decode(nk); 503 return 2; 504 } 505 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 506 // Move wide constant: movz, movk, movk. See movptr(). 507 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 508 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 509 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 510 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 511 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 512 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 513 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 514 return 3; 515 } 516 virtual void verify(address insn_addr, address &target) { 517 } 518 }; 519 520 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 521 AArch64Decoder decoder(insn_addr, insn); 522 address target; 523 decoder.run(insn_addr, target); 524 return target; 525 } 526 527 // Patch any kind of instruction; there may be several instructions. 528 // Return the total length (in bytes) of the instructions. 529 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 530 Patcher patcher(insn_addr); 531 return patcher.run(insn_addr, target); 532 } 533 534 int MacroAssembler::patch_oop(address insn_addr, address o) { 535 int instructions; 536 unsigned insn = *(unsigned*)insn_addr; 537 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 538 539 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 540 // narrow OOPs by setting the upper 16 bits in the first 541 // instruction. 542 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 543 // Move narrow OOP 544 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 545 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 546 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 547 instructions = 2; 548 } else { 549 // Move wide OOP 550 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 551 uintptr_t dest = (uintptr_t)o; 552 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 553 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 554 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 555 instructions = 3; 556 } 557 return instructions * NativeInstruction::instruction_size; 558 } 559 560 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 561 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 562 // We encode narrow ones by setting the upper 16 bits in the first 563 // instruction. 564 NativeInstruction *insn = nativeInstruction_at(insn_addr); 565 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 566 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 567 568 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 569 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 570 return 2 * NativeInstruction::instruction_size; 571 } 572 573 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 574 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 575 return nullptr; 576 } 577 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 578 } 579 580 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 581 if (acquire) { 582 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 583 ldar(tmp, tmp); 584 } else { 585 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 586 } 587 if (at_return) { 588 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 589 // we may safely use the sp instead to perform the stack watermark check. 590 cmp(in_nmethod ? sp : rfp, tmp); 591 br(Assembler::HI, slow_path); 592 } else { 593 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 594 } 595 } 596 597 void MacroAssembler::rt_call(address dest, Register tmp) { 598 CodeBlob *cb = CodeCache::find_blob(dest); 599 if (cb) { 600 far_call(RuntimeAddress(dest)); 601 } else { 602 lea(tmp, RuntimeAddress(dest)); 603 blr(tmp); 604 } 605 } 606 607 void MacroAssembler::push_cont_fastpath(Register java_thread) { 608 if (!Continuations::enabled()) return; 609 Label done; 610 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 611 cmp(sp, rscratch1); 612 br(Assembler::LS, done); 613 mov(rscratch1, sp); // we can't use sp as the source in str 614 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 615 bind(done); 616 } 617 618 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 619 if (!Continuations::enabled()) return; 620 Label done; 621 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 622 cmp(sp, rscratch1); 623 br(Assembler::LO, done); 624 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 625 bind(done); 626 } 627 628 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 629 // we must set sp to zero to clear frame 630 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 631 632 // must clear fp, so that compiled frames are not confused; it is 633 // possible that we need it only for debugging 634 if (clear_fp) { 635 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 636 } 637 638 // Always clear the pc because it could have been set by make_walkable() 639 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 640 } 641 642 // Calls to C land 643 // 644 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 645 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 646 // has to be reset to 0. This is required to allow proper stack traversal. 647 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 648 Register last_java_fp, 649 Register last_java_pc, 650 Register scratch) { 651 652 if (last_java_pc->is_valid()) { 653 str(last_java_pc, Address(rthread, 654 JavaThread::frame_anchor_offset() 655 + JavaFrameAnchor::last_Java_pc_offset())); 656 } 657 658 // determine last_java_sp register 659 if (last_java_sp == sp) { 660 mov(scratch, sp); 661 last_java_sp = scratch; 662 } else if (!last_java_sp->is_valid()) { 663 last_java_sp = esp; 664 } 665 666 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 667 668 // last_java_fp is optional 669 if (last_java_fp->is_valid()) { 670 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 671 } 672 } 673 674 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 675 Register last_java_fp, 676 address last_java_pc, 677 Register scratch) { 678 assert(last_java_pc != nullptr, "must provide a valid PC"); 679 680 adr(scratch, last_java_pc); 681 str(scratch, Address(rthread, 682 JavaThread::frame_anchor_offset() 683 + JavaFrameAnchor::last_Java_pc_offset())); 684 685 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 686 } 687 688 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 689 Register last_java_fp, 690 Label &L, 691 Register scratch) { 692 if (L.is_bound()) { 693 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 694 } else { 695 InstructionMark im(this); 696 L.add_patch_at(code(), locator()); 697 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 698 } 699 } 700 701 static inline bool target_needs_far_branch(address addr) { 702 if (SCCache::is_on_for_write()) { 703 return true; 704 } 705 // codecache size <= 128M 706 if (!MacroAssembler::far_branches()) { 707 return false; 708 } 709 // codecache size > 240M 710 if (MacroAssembler::codestub_branch_needs_far_jump()) { 711 return true; 712 } 713 // codecache size: 128M..240M 714 return !CodeCache::is_non_nmethod(addr); 715 } 716 717 void MacroAssembler::far_call(Address entry, Register tmp) { 718 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 719 assert(CodeCache::find_blob(entry.target()) != nullptr, 720 "destination of far call not found in code cache"); 721 assert(entry.rspec().type() == relocInfo::external_word_type 722 || entry.rspec().type() == relocInfo::runtime_call_type 723 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 724 if (target_needs_far_branch(entry.target())) { 725 uint64_t offset; 726 // We can use ADRP here because we know that the total size of 727 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 728 adrp(tmp, entry, offset); 729 add(tmp, tmp, offset); 730 blr(tmp); 731 } else { 732 bl(entry); 733 } 734 } 735 736 int MacroAssembler::far_jump(Address entry, Register tmp) { 737 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 738 assert(CodeCache::find_blob(entry.target()) != nullptr, 739 "destination of far call not found in code cache"); 740 assert(entry.rspec().type() == relocInfo::external_word_type 741 || entry.rspec().type() == relocInfo::runtime_call_type 742 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 743 address start = pc(); 744 if (target_needs_far_branch(entry.target())) { 745 uint64_t offset; 746 // We can use ADRP here because we know that the total size of 747 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 748 adrp(tmp, entry, offset); 749 add(tmp, tmp, offset); 750 br(tmp); 751 } else { 752 b(entry); 753 } 754 return pc() - start; 755 } 756 757 void MacroAssembler::reserved_stack_check() { 758 // testing if reserved zone needs to be enabled 759 Label no_reserved_zone_enabling; 760 761 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 762 cmp(sp, rscratch1); 763 br(Assembler::LO, no_reserved_zone_enabling); 764 765 enter(); // LR and FP are live. 766 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone))); 767 mov(c_rarg0, rthread); 768 blr(rscratch1); 769 leave(); 770 771 // We have already removed our own frame. 772 // throw_delayed_StackOverflowError will think that it's been 773 // called by our caller. 774 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 775 br(rscratch1); 776 should_not_reach_here(); 777 778 bind(no_reserved_zone_enabling); 779 } 780 781 static void pass_arg0(MacroAssembler* masm, Register arg) { 782 if (c_rarg0 != arg ) { 783 masm->mov(c_rarg0, arg); 784 } 785 } 786 787 static void pass_arg1(MacroAssembler* masm, Register arg) { 788 if (c_rarg1 != arg ) { 789 masm->mov(c_rarg1, arg); 790 } 791 } 792 793 static void pass_arg2(MacroAssembler* masm, Register arg) { 794 if (c_rarg2 != arg ) { 795 masm->mov(c_rarg2, arg); 796 } 797 } 798 799 static void pass_arg3(MacroAssembler* masm, Register arg) { 800 if (c_rarg3 != arg ) { 801 masm->mov(c_rarg3, arg); 802 } 803 } 804 805 static bool is_preemptable(address entry_point) { 806 return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter); 807 } 808 809 void MacroAssembler::call_VM_base(Register oop_result, 810 Register java_thread, 811 Register last_java_sp, 812 address entry_point, 813 int number_of_arguments, 814 bool check_exceptions) { 815 // determine java_thread register 816 if (!java_thread->is_valid()) { 817 java_thread = rthread; 818 } 819 820 // determine last_java_sp register 821 if (!last_java_sp->is_valid()) { 822 last_java_sp = esp; 823 } 824 825 // debugging support 826 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 827 assert(java_thread == rthread, "unexpected register"); 828 #ifdef ASSERT 829 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 830 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 831 #endif // ASSERT 832 833 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 834 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 835 836 // push java thread (becomes first argument of C function) 837 838 mov(c_rarg0, java_thread); 839 840 // set last Java frame before call 841 assert(last_java_sp != rfp, "can't use rfp"); 842 843 Label l; 844 if (is_preemptable(entry_point)) { 845 // skip setting last_pc since we already set it to desired value. 846 set_last_Java_frame(last_java_sp, rfp, noreg, rscratch1); 847 } else { 848 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 849 } 850 851 // do the call, remove parameters 852 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 853 854 // lr could be poisoned with PAC signature during throw_pending_exception 855 // if it was tail-call optimized by compiler, since lr is not callee-saved 856 // reload it with proper value 857 adr(lr, l); 858 859 // reset last Java frame 860 // Only interpreter should have to clear fp 861 reset_last_Java_frame(true); 862 863 // C++ interp handles this in the interpreter 864 check_and_handle_popframe(java_thread); 865 check_and_handle_earlyret(java_thread); 866 867 if (check_exceptions) { 868 // check for pending exceptions (java_thread is set upon return) 869 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 870 Label ok; 871 cbz(rscratch1, ok); 872 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 873 br(rscratch1); 874 bind(ok); 875 } 876 877 // get oop result if there is one and reset the value in the thread 878 if (oop_result->is_valid()) { 879 get_vm_result(oop_result, java_thread); 880 } 881 } 882 883 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 884 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 885 } 886 887 // Check the entry target is always reachable from any branch. 888 static bool is_always_within_branch_range(Address entry) { 889 if (SCCache::is_on_for_write()) { 890 return false; 891 } 892 const address target = entry.target(); 893 894 if (!CodeCache::contains(target)) { 895 // We always use trampolines for callees outside CodeCache. 896 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 897 return false; 898 } 899 900 if (!MacroAssembler::far_branches()) { 901 return true; 902 } 903 904 if (entry.rspec().type() == relocInfo::runtime_call_type) { 905 // Runtime calls are calls of a non-compiled method (stubs, adapters). 906 // Non-compiled methods stay forever in CodeCache. 907 // We check whether the longest possible branch is within the branch range. 908 assert(CodeCache::find_blob(target) != nullptr && 909 !CodeCache::find_blob(target)->is_nmethod(), 910 "runtime call of compiled method"); 911 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 912 const address left_longest_branch_start = CodeCache::low_bound(); 913 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 914 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 915 return is_reachable; 916 } 917 918 return false; 919 } 920 921 // Maybe emit a call via a trampoline. If the code cache is small 922 // trampolines won't be emitted. 923 address MacroAssembler::trampoline_call(Address entry) { 924 assert(entry.rspec().type() == relocInfo::runtime_call_type 925 || entry.rspec().type() == relocInfo::opt_virtual_call_type 926 || entry.rspec().type() == relocInfo::static_call_type 927 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 928 929 address target = entry.target(); 930 931 if (!is_always_within_branch_range(entry)) { 932 if (!in_scratch_emit_size()) { 933 // We don't want to emit a trampoline if C2 is generating dummy 934 // code during its branch shortening phase. 935 if (entry.rspec().type() == relocInfo::runtime_call_type) { 936 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 937 code()->share_trampoline_for(entry.target(), offset()); 938 } else { 939 address stub = emit_trampoline_stub(offset(), target); 940 if (stub == nullptr) { 941 postcond(pc() == badAddress); 942 return nullptr; // CodeCache is full 943 } 944 } 945 } 946 target = pc(); 947 } 948 949 address call_pc = pc(); 950 relocate(entry.rspec()); 951 bl(target); 952 953 postcond(pc() != badAddress); 954 return call_pc; 955 } 956 957 // Emit a trampoline stub for a call to a target which is too far away. 958 // 959 // code sequences: 960 // 961 // call-site: 962 // branch-and-link to <destination> or <trampoline stub> 963 // 964 // Related trampoline stub for this call site in the stub section: 965 // load the call target from the constant pool 966 // branch (LR still points to the call site above) 967 968 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 969 address dest) { 970 // Max stub size: alignment nop, TrampolineStub. 971 address stub = start_a_stub(max_trampoline_stub_size()); 972 if (stub == nullptr) { 973 return nullptr; // CodeBuffer::expand failed 974 } 975 976 // Create a trampoline stub relocation which relates this trampoline stub 977 // with the call instruction at insts_call_instruction_offset in the 978 // instructions code-section. 979 align(wordSize); 980 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 981 + insts_call_instruction_offset)); 982 const int stub_start_offset = offset(); 983 984 // Now, create the trampoline stub's code: 985 // - load the call 986 // - call 987 Label target; 988 ldr(rscratch1, target); 989 br(rscratch1); 990 bind(target); 991 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 992 "should be"); 993 emit_int64((int64_t)dest); 994 995 const address stub_start_addr = addr_at(stub_start_offset); 996 997 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 998 999 end_a_stub(); 1000 return stub_start_addr; 1001 } 1002 1003 int MacroAssembler::max_trampoline_stub_size() { 1004 // Max stub size: alignment nop, TrampolineStub. 1005 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 1006 } 1007 1008 void MacroAssembler::emit_static_call_stub() { 1009 // CompiledDirectCall::set_to_interpreted knows the 1010 // exact layout of this stub. 1011 1012 isb(); 1013 mov_metadata(rmethod, nullptr); 1014 1015 // Jump to the entry point of the c2i stub. 1016 movptr(rscratch1, 0); 1017 br(rscratch1); 1018 } 1019 1020 int MacroAssembler::static_call_stub_size() { 1021 // isb; movk; movz; movz; movk; movz; movz; br 1022 return 8 * NativeInstruction::instruction_size; 1023 } 1024 1025 void MacroAssembler::c2bool(Register x) { 1026 // implements x == 0 ? 0 : 1 1027 // note: must only look at least-significant byte of x 1028 // since C-style booleans are stored in one byte 1029 // only! (was bug) 1030 tst(x, 0xff); 1031 cset(x, Assembler::NE); 1032 } 1033 1034 address MacroAssembler::ic_call(address entry, jint method_index) { 1035 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1036 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1037 // uintptr_t offset; 1038 // ldr_constant(rscratch2, const_ptr); 1039 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1040 return trampoline_call(Address(entry, rh)); 1041 } 1042 1043 int MacroAssembler::ic_check_size() { 1044 int extra_instructions = UseCompactObjectHeaders ? 1 : 0; 1045 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1046 return NativeInstruction::instruction_size * (7 + extra_instructions); 1047 } else { 1048 return NativeInstruction::instruction_size * (5 + extra_instructions); 1049 } 1050 } 1051 1052 int MacroAssembler::ic_check(int end_alignment) { 1053 Register receiver = j_rarg0; 1054 Register data = rscratch2; 1055 Register tmp1 = rscratch1; 1056 Register tmp2 = r10; 1057 1058 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1059 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1060 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1061 // before the inline cache check here, and not after 1062 align(end_alignment, offset() + ic_check_size()); 1063 1064 int uep_offset = offset(); 1065 1066 if (UseCompactObjectHeaders) { 1067 load_narrow_klass_compact(tmp1, receiver); 1068 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1069 cmpw(tmp1, tmp2); 1070 } else if (UseCompressedClassPointers) { 1071 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1072 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1073 cmpw(tmp1, tmp2); 1074 } else { 1075 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1076 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1077 cmp(tmp1, tmp2); 1078 } 1079 1080 Label dont; 1081 br(Assembler::EQ, dont); 1082 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1083 bind(dont); 1084 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1085 1086 return uep_offset; 1087 } 1088 1089 // Implementation of call_VM versions 1090 1091 void MacroAssembler::call_VM(Register oop_result, 1092 address entry_point, 1093 bool check_exceptions) { 1094 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1095 } 1096 1097 void MacroAssembler::call_VM(Register oop_result, 1098 address entry_point, 1099 Register arg_1, 1100 bool check_exceptions) { 1101 pass_arg1(this, arg_1); 1102 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1103 } 1104 1105 void MacroAssembler::call_VM(Register oop_result, 1106 address entry_point, 1107 Register arg_1, 1108 Register arg_2, 1109 bool check_exceptions) { 1110 assert_different_registers(arg_1, c_rarg2); 1111 pass_arg2(this, arg_2); 1112 pass_arg1(this, arg_1); 1113 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1114 } 1115 1116 void MacroAssembler::call_VM(Register oop_result, 1117 address entry_point, 1118 Register arg_1, 1119 Register arg_2, 1120 Register arg_3, 1121 bool check_exceptions) { 1122 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1123 assert_different_registers(arg_2, c_rarg3); 1124 pass_arg3(this, arg_3); 1125 1126 pass_arg2(this, arg_2); 1127 1128 pass_arg1(this, arg_1); 1129 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1130 } 1131 1132 void MacroAssembler::call_VM(Register oop_result, 1133 Register last_java_sp, 1134 address entry_point, 1135 int number_of_arguments, 1136 bool check_exceptions) { 1137 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1138 } 1139 1140 void MacroAssembler::call_VM(Register oop_result, 1141 Register last_java_sp, 1142 address entry_point, 1143 Register arg_1, 1144 bool check_exceptions) { 1145 pass_arg1(this, arg_1); 1146 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1147 } 1148 1149 void MacroAssembler::call_VM(Register oop_result, 1150 Register last_java_sp, 1151 address entry_point, 1152 Register arg_1, 1153 Register arg_2, 1154 bool check_exceptions) { 1155 1156 assert_different_registers(arg_1, c_rarg2); 1157 pass_arg2(this, arg_2); 1158 pass_arg1(this, arg_1); 1159 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1160 } 1161 1162 void MacroAssembler::call_VM(Register oop_result, 1163 Register last_java_sp, 1164 address entry_point, 1165 Register arg_1, 1166 Register arg_2, 1167 Register arg_3, 1168 bool check_exceptions) { 1169 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1170 assert_different_registers(arg_2, c_rarg3); 1171 pass_arg3(this, arg_3); 1172 pass_arg2(this, arg_2); 1173 pass_arg1(this, arg_1); 1174 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1175 } 1176 1177 1178 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1179 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1180 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1181 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1182 } 1183 1184 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1185 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1186 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1187 } 1188 1189 void MacroAssembler::align(int modulus) { 1190 align(modulus, offset()); 1191 } 1192 1193 // Ensure that the code at target bytes offset from the current offset() is aligned 1194 // according to modulus. 1195 void MacroAssembler::align(int modulus, int target) { 1196 int delta = target - offset(); 1197 while ((offset() + delta) % modulus != 0) nop(); 1198 } 1199 1200 void MacroAssembler::post_call_nop() { 1201 if (!Continuations::enabled()) { 1202 return; 1203 } 1204 InstructionMark im(this); 1205 relocate(post_call_nop_Relocation::spec()); 1206 InlineSkippedInstructionsCounter skipCounter(this); 1207 nop(); 1208 movk(zr, 0); 1209 movk(zr, 0); 1210 } 1211 1212 // these are no-ops overridden by InterpreterMacroAssembler 1213 1214 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1215 1216 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1217 1218 // Look up the method for a megamorphic invokeinterface call. 1219 // The target method is determined by <intf_klass, itable_index>. 1220 // The receiver klass is in recv_klass. 1221 // On success, the result will be in method_result, and execution falls through. 1222 // On failure, execution transfers to the given label. 1223 void MacroAssembler::lookup_interface_method(Register recv_klass, 1224 Register intf_klass, 1225 RegisterOrConstant itable_index, 1226 Register method_result, 1227 Register scan_temp, 1228 Label& L_no_such_interface, 1229 bool return_method) { 1230 assert_different_registers(recv_klass, intf_klass, scan_temp); 1231 assert_different_registers(method_result, intf_klass, scan_temp); 1232 assert(recv_klass != method_result || !return_method, 1233 "recv_klass can be destroyed when method isn't needed"); 1234 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1235 "caller must use same register for non-constant itable index as for method"); 1236 1237 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1238 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1239 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1240 int scan_step = itableOffsetEntry::size() * wordSize; 1241 int vte_size = vtableEntry::size_in_bytes(); 1242 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1243 1244 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1245 1246 // Could store the aligned, prescaled offset in the klass. 1247 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1248 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1249 add(scan_temp, scan_temp, vtable_base); 1250 1251 if (return_method) { 1252 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1253 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1254 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1255 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1256 if (itentry_off) 1257 add(recv_klass, recv_klass, itentry_off); 1258 } 1259 1260 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1261 // if (scan->interface() == intf) { 1262 // result = (klass + scan->offset() + itable_index); 1263 // } 1264 // } 1265 Label search, found_method; 1266 1267 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1268 cmp(intf_klass, method_result); 1269 br(Assembler::EQ, found_method); 1270 bind(search); 1271 // Check that the previous entry is non-null. A null entry means that 1272 // the receiver class doesn't implement the interface, and wasn't the 1273 // same as when the caller was compiled. 1274 cbz(method_result, L_no_such_interface); 1275 if (itableOffsetEntry::interface_offset() != 0) { 1276 add(scan_temp, scan_temp, scan_step); 1277 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1278 } else { 1279 ldr(method_result, Address(pre(scan_temp, scan_step))); 1280 } 1281 cmp(intf_klass, method_result); 1282 br(Assembler::NE, search); 1283 1284 bind(found_method); 1285 1286 // Got a hit. 1287 if (return_method) { 1288 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1289 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1290 } 1291 } 1292 1293 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1294 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1295 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1296 // The target method is determined by <holder_klass, itable_index>. 1297 // The receiver klass is in recv_klass. 1298 // On success, the result will be in method_result, and execution falls through. 1299 // On failure, execution transfers to the given label. 1300 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1301 Register holder_klass, 1302 Register resolved_klass, 1303 Register method_result, 1304 Register temp_itbl_klass, 1305 Register scan_temp, 1306 int itable_index, 1307 Label& L_no_such_interface) { 1308 // 'method_result' is only used as output register at the very end of this method. 1309 // Until then we can reuse it as 'holder_offset'. 1310 Register holder_offset = method_result; 1311 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1312 1313 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1314 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1315 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1316 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1317 1318 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1319 1320 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1321 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1322 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1323 // temp_itbl_klass = itable[0]._interface; 1324 int vtblEntrySize = vtableEntry::size_in_bytes(); 1325 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1326 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1327 mov(holder_offset, zr); 1328 // scan_temp = &(itable[0]._interface) 1329 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1330 1331 // Initial checks: 1332 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1333 // - if (itable[0] == holder_klass), shortcut to "holder found" 1334 // - if (itable[0] == 0), no such interface 1335 cmp(resolved_klass, holder_klass); 1336 br(Assembler::NE, L_loop_search_resolved_entry); 1337 cmp(holder_klass, temp_itbl_klass); 1338 br(Assembler::EQ, L_holder_found); 1339 cbz(temp_itbl_klass, L_no_such_interface); 1340 1341 // Loop: Look for holder_klass record in itable 1342 // do { 1343 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1344 // if (temp_itbl_klass == holder_klass) { 1345 // goto L_holder_found; // Found! 1346 // } 1347 // } while (temp_itbl_klass != 0); 1348 // goto L_no_such_interface // Not found. 1349 Label L_search_holder; 1350 bind(L_search_holder); 1351 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1352 cmp(holder_klass, temp_itbl_klass); 1353 br(Assembler::EQ, L_holder_found); 1354 cbnz(temp_itbl_klass, L_search_holder); 1355 1356 b(L_no_such_interface); 1357 1358 // Loop: Look for resolved_class record in itable 1359 // while (true) { 1360 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1361 // if (temp_itbl_klass == 0) { 1362 // goto L_no_such_interface; 1363 // } 1364 // if (temp_itbl_klass == resolved_klass) { 1365 // goto L_resolved_found; // Found! 1366 // } 1367 // if (temp_itbl_klass == holder_klass) { 1368 // holder_offset = scan_temp; 1369 // } 1370 // } 1371 // 1372 Label L_loop_search_resolved; 1373 bind(L_loop_search_resolved); 1374 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1375 bind(L_loop_search_resolved_entry); 1376 cbz(temp_itbl_klass, L_no_such_interface); 1377 cmp(resolved_klass, temp_itbl_klass); 1378 br(Assembler::EQ, L_resolved_found); 1379 cmp(holder_klass, temp_itbl_klass); 1380 br(Assembler::NE, L_loop_search_resolved); 1381 mov(holder_offset, scan_temp); 1382 b(L_loop_search_resolved); 1383 1384 // See if we already have a holder klass. If not, go and scan for it. 1385 bind(L_resolved_found); 1386 cbz(holder_offset, L_search_holder); 1387 mov(scan_temp, holder_offset); 1388 1389 // Finally, scan_temp contains holder_klass vtable offset 1390 bind(L_holder_found); 1391 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1392 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1393 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1394 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1395 } 1396 1397 // virtual method calling 1398 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1399 RegisterOrConstant vtable_index, 1400 Register method_result) { 1401 assert(vtableEntry::size() * wordSize == 8, 1402 "adjust the scaling in the code below"); 1403 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1404 1405 if (vtable_index.is_register()) { 1406 lea(method_result, Address(recv_klass, 1407 vtable_index.as_register(), 1408 Address::lsl(LogBytesPerWord))); 1409 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1410 } else { 1411 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1412 ldr(method_result, 1413 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1414 } 1415 } 1416 1417 void MacroAssembler::check_klass_subtype(Register sub_klass, 1418 Register super_klass, 1419 Register temp_reg, 1420 Label& L_success) { 1421 Label L_failure; 1422 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1423 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1424 bind(L_failure); 1425 } 1426 1427 1428 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1429 Register super_klass, 1430 Register temp_reg, 1431 Label* L_success, 1432 Label* L_failure, 1433 Label* L_slow_path, 1434 Register super_check_offset) { 1435 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset); 1436 bool must_load_sco = ! super_check_offset->is_valid(); 1437 if (must_load_sco) { 1438 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1439 } 1440 1441 Label L_fallthrough; 1442 int label_nulls = 0; 1443 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1444 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1445 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1446 assert(label_nulls <= 1, "at most one null in the batch"); 1447 1448 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1449 Address super_check_offset_addr(super_klass, sco_offset); 1450 1451 // Hacked jmp, which may only be used just before L_fallthrough. 1452 #define final_jmp(label) \ 1453 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1454 else b(label) /*omit semi*/ 1455 1456 // If the pointers are equal, we are done (e.g., String[] elements). 1457 // This self-check enables sharing of secondary supertype arrays among 1458 // non-primary types such as array-of-interface. Otherwise, each such 1459 // type would need its own customized SSA. 1460 // We move this check to the front of the fast path because many 1461 // type checks are in fact trivially successful in this manner, 1462 // so we get a nicely predicted branch right at the start of the check. 1463 cmp(sub_klass, super_klass); 1464 br(Assembler::EQ, *L_success); 1465 1466 // Check the supertype display: 1467 if (must_load_sco) { 1468 ldrw(temp_reg, super_check_offset_addr); 1469 super_check_offset = temp_reg; 1470 } 1471 1472 Address super_check_addr(sub_klass, super_check_offset); 1473 ldr(rscratch1, super_check_addr); 1474 cmp(super_klass, rscratch1); // load displayed supertype 1475 br(Assembler::EQ, *L_success); 1476 1477 // This check has worked decisively for primary supers. 1478 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1479 // (Secondary supers are interfaces and very deeply nested subtypes.) 1480 // This works in the same check above because of a tricky aliasing 1481 // between the super_cache and the primary super display elements. 1482 // (The 'super_check_addr' can address either, as the case requires.) 1483 // Note that the cache is updated below if it does not help us find 1484 // what we need immediately. 1485 // So if it was a primary super, we can just fail immediately. 1486 // Otherwise, it's the slow path for us (no success at this point). 1487 1488 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset())); 1489 if (L_failure == &L_fallthrough) { 1490 cbz(rscratch1, *L_slow_path); 1491 } else { 1492 cbnz(rscratch1, *L_failure); 1493 final_jmp(*L_slow_path); 1494 } 1495 1496 bind(L_fallthrough); 1497 1498 #undef final_jmp 1499 } 1500 1501 // These two are taken from x86, but they look generally useful 1502 1503 // scans count pointer sized words at [addr] for occurrence of value, 1504 // generic 1505 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1506 Register scratch) { 1507 Label Lloop, Lexit; 1508 cbz(count, Lexit); 1509 bind(Lloop); 1510 ldr(scratch, post(addr, wordSize)); 1511 cmp(value, scratch); 1512 br(EQ, Lexit); 1513 sub(count, count, 1); 1514 cbnz(count, Lloop); 1515 bind(Lexit); 1516 } 1517 1518 // scans count 4 byte words at [addr] for occurrence of value, 1519 // generic 1520 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1521 Register scratch) { 1522 Label Lloop, Lexit; 1523 cbz(count, Lexit); 1524 bind(Lloop); 1525 ldrw(scratch, post(addr, wordSize)); 1526 cmpw(value, scratch); 1527 br(EQ, Lexit); 1528 sub(count, count, 1); 1529 cbnz(count, Lloop); 1530 bind(Lexit); 1531 } 1532 1533 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 1534 Register super_klass, 1535 Register temp_reg, 1536 Register temp2_reg, 1537 Label* L_success, 1538 Label* L_failure, 1539 bool set_cond_codes) { 1540 // NB! Callers may assume that, when temp2_reg is a valid register, 1541 // this code sets it to a nonzero value. 1542 1543 assert_different_registers(sub_klass, super_klass, temp_reg); 1544 if (temp2_reg != noreg) 1545 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1546 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1547 1548 Label L_fallthrough; 1549 int label_nulls = 0; 1550 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1551 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1552 assert(label_nulls <= 1, "at most one null in the batch"); 1553 1554 // a couple of useful fields in sub_klass: 1555 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1556 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1557 Address secondary_supers_addr(sub_klass, ss_offset); 1558 Address super_cache_addr( sub_klass, sc_offset); 1559 1560 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1561 1562 // Do a linear scan of the secondary super-klass chain. 1563 // This code is rarely used, so simplicity is a virtue here. 1564 // The repne_scan instruction uses fixed registers, which we must spill. 1565 // Don't worry too much about pre-existing connections with the input regs. 1566 1567 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1568 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1569 1570 RegSet pushed_registers; 1571 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1572 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1573 1574 if (super_klass != r0) { 1575 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1576 } 1577 1578 push(pushed_registers, sp); 1579 1580 // Get super_klass value into r0 (even if it was in r5 or r2). 1581 if (super_klass != r0) { 1582 mov(r0, super_klass); 1583 } 1584 1585 #ifndef PRODUCT 1586 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1587 #endif //PRODUCT 1588 1589 // We will consult the secondary-super array. 1590 ldr(r5, secondary_supers_addr); 1591 // Load the array length. 1592 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1593 // Skip to start of data. 1594 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1595 1596 cmp(sp, zr); // Clear Z flag; SP is never zero 1597 // Scan R2 words at [R5] for an occurrence of R0. 1598 // Set NZ/Z based on last compare. 1599 repne_scan(r5, r0, r2, rscratch1); 1600 1601 // Unspill the temp. registers: 1602 pop(pushed_registers, sp); 1603 1604 br(Assembler::NE, *L_failure); 1605 1606 // Success. Cache the super we found and proceed in triumph. 1607 1608 if (UseSecondarySupersCache) { 1609 str(super_klass, super_cache_addr); 1610 } 1611 1612 if (L_success != &L_fallthrough) { 1613 b(*L_success); 1614 } 1615 1616 #undef IS_A_TEMP 1617 1618 bind(L_fallthrough); 1619 } 1620 1621 // If Register r is invalid, remove a new register from 1622 // available_regs, and add new register to regs_to_push. 1623 Register MacroAssembler::allocate_if_noreg(Register r, 1624 RegSetIterator<Register> &available_regs, 1625 RegSet ®s_to_push) { 1626 if (!r->is_valid()) { 1627 r = *available_regs++; 1628 regs_to_push += r; 1629 } 1630 return r; 1631 } 1632 1633 // check_klass_subtype_slow_path_table() looks for super_klass in the 1634 // hash table belonging to super_klass, branching to L_success or 1635 // L_failure as appropriate. This is essentially a shim which 1636 // allocates registers as necessary then calls 1637 // lookup_secondary_supers_table() to do the work. Any of the temp 1638 // regs may be noreg, in which case this logic will chooses some 1639 // registers push and pop them from the stack. 1640 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 1641 Register super_klass, 1642 Register temp_reg, 1643 Register temp2_reg, 1644 Register temp3_reg, 1645 Register result_reg, 1646 FloatRegister vtemp, 1647 Label* L_success, 1648 Label* L_failure, 1649 bool set_cond_codes) { 1650 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 1651 1652 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1653 1654 Label L_fallthrough; 1655 int label_nulls = 0; 1656 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1657 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1658 assert(label_nulls <= 1, "at most one null in the batch"); 1659 1660 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1661 1662 RegSetIterator<Register> available_regs 1663 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin(); 1664 1665 RegSet pushed_regs; 1666 1667 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 1668 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 1669 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 1670 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 1671 1672 push(pushed_regs, sp); 1673 1674 lookup_secondary_supers_table_var(sub_klass, 1675 super_klass, 1676 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg, 1677 nullptr); 1678 cmp(result_reg, zr); 1679 1680 // Unspill the temp. registers: 1681 pop(pushed_regs, sp); 1682 1683 // NB! Callers may assume that, when set_cond_codes is true, this 1684 // code sets temp2_reg to a nonzero value. 1685 if (set_cond_codes) { 1686 mov(temp2_reg, 1); 1687 } 1688 1689 br(Assembler::NE, *L_failure); 1690 1691 if (L_success != &L_fallthrough) { 1692 b(*L_success); 1693 } 1694 1695 bind(L_fallthrough); 1696 } 1697 1698 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1699 Register super_klass, 1700 Register temp_reg, 1701 Register temp2_reg, 1702 Label* L_success, 1703 Label* L_failure, 1704 bool set_cond_codes) { 1705 if (UseSecondarySupersTable) { 1706 check_klass_subtype_slow_path_table 1707 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg, 1708 /*vtemp*/fnoreg, 1709 L_success, L_failure, set_cond_codes); 1710 } else { 1711 check_klass_subtype_slow_path_linear 1712 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes); 1713 } 1714 } 1715 1716 1717 // Ensure that the inline code and the stub are using the same registers. 1718 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1719 do { \ 1720 assert(r_super_klass == r0 && \ 1721 r_array_base == r1 && \ 1722 r_array_length == r2 && \ 1723 (r_array_index == r3 || r_array_index == noreg) && \ 1724 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1725 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1726 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1727 } while(0) 1728 1729 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 1730 Register r_super_klass, 1731 Register temp1, 1732 Register temp2, 1733 Register temp3, 1734 FloatRegister vtemp, 1735 Register result, 1736 u1 super_klass_slot, 1737 bool stub_is_near) { 1738 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1739 1740 Label L_fallthrough; 1741 1742 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1743 1744 const Register 1745 r_array_base = temp1, // r1 1746 r_array_length = temp2, // r2 1747 r_array_index = temp3, // r3 1748 r_bitmap = rscratch2; 1749 1750 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1751 1752 u1 bit = super_klass_slot; 1753 1754 // Make sure that result is nonzero if the TBZ below misses. 1755 mov(result, 1); 1756 1757 // We're going to need the bitmap in a vector reg and in a core reg, 1758 // so load both now. 1759 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1760 if (bit != 0) { 1761 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1762 } 1763 // First check the bitmap to see if super_klass might be present. If 1764 // the bit is zero, we are certain that super_klass is not one of 1765 // the secondary supers. 1766 tbz(r_bitmap, bit, L_fallthrough); 1767 1768 // Get the first array index that can contain super_klass into r_array_index. 1769 if (bit != 0) { 1770 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1771 cnt(vtemp, T8B, vtemp); 1772 addv(vtemp, T8B, vtemp); 1773 fmovd(r_array_index, vtemp); 1774 } else { 1775 mov(r_array_index, (u1)1); 1776 } 1777 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1778 1779 // We will consult the secondary-super array. 1780 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1781 1782 // The value i in r_array_index is >= 1, so even though r_array_base 1783 // points to the length, we don't need to adjust it to point to the 1784 // data. 1785 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1786 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1787 1788 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1789 eor(result, result, r_super_klass); 1790 cbz(result, L_fallthrough); // Found a match 1791 1792 // Is there another entry to check? Consult the bitmap. 1793 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1794 1795 // Linear probe. 1796 if (bit != 0) { 1797 ror(r_bitmap, r_bitmap, bit); 1798 } 1799 1800 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1801 // The next slot to be inspected, by the stub we're about to call, 1802 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1803 // have been checked. 1804 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1805 if (stub_is_near) { 1806 bl(stub); 1807 } else { 1808 address call = trampoline_call(stub); 1809 if (call == nullptr) { 1810 return false; // trampoline allocation failed 1811 } 1812 } 1813 1814 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1815 1816 bind(L_fallthrough); 1817 1818 if (VerifySecondarySupers) { 1819 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1820 temp1, temp2, result); // r1, r2, r5 1821 } 1822 return true; 1823 } 1824 1825 // At runtime, return 0 in result if r_super_klass is a superclass of 1826 // r_sub_klass, otherwise return nonzero. Use this version of 1827 // lookup_secondary_supers_table() if you don't know ahead of time 1828 // which superclass will be searched for. Used by interpreter and 1829 // runtime stubs. It is larger and has somewhat greater latency than 1830 // the version above, which takes a constant super_klass_slot. 1831 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 1832 Register r_super_klass, 1833 Register temp1, 1834 Register temp2, 1835 Register temp3, 1836 FloatRegister vtemp, 1837 Register result, 1838 Label *L_success) { 1839 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1840 1841 Label L_fallthrough; 1842 1843 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1844 1845 const Register 1846 r_array_index = temp3, 1847 slot = rscratch1, 1848 r_bitmap = rscratch2; 1849 1850 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 1851 1852 // Make sure that result is nonzero if the test below misses. 1853 mov(result, 1); 1854 1855 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1856 1857 // First check the bitmap to see if super_klass might be present. If 1858 // the bit is zero, we are certain that super_klass is not one of 1859 // the secondary supers. 1860 1861 // This next instruction is equivalent to: 1862 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); 1863 // sub(temp2, tmp_reg, slot); 1864 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); 1865 lslv(temp2, r_bitmap, temp2); 1866 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough); 1867 1868 bool must_save_v0 = (vtemp == fnoreg); 1869 if (must_save_v0) { 1870 // temp1 and result are free, so use them to preserve vtemp 1871 vtemp = v0; 1872 mov(temp1, vtemp, D, 0); 1873 mov(result, vtemp, D, 1); 1874 } 1875 1876 // Get the first array index that can contain super_klass into r_array_index. 1877 mov(vtemp, D, 0, temp2); 1878 cnt(vtemp, T8B, vtemp); 1879 addv(vtemp, T8B, vtemp); 1880 mov(r_array_index, vtemp, D, 0); 1881 1882 if (must_save_v0) { 1883 mov(vtemp, D, 0, temp1 ); 1884 mov(vtemp, D, 1, result); 1885 } 1886 1887 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1888 1889 const Register 1890 r_array_base = temp1, 1891 r_array_length = temp2; 1892 1893 // The value i in r_array_index is >= 1, so even though r_array_base 1894 // points to the length, we don't need to adjust it to point to the 1895 // data. 1896 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1897 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1898 1899 // We will consult the secondary-super array. 1900 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1901 1902 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1903 eor(result, result, r_super_klass); 1904 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match 1905 1906 // Is there another entry to check? Consult the bitmap. 1907 rorv(r_bitmap, r_bitmap, slot); 1908 // rol(r_bitmap, r_bitmap, 1); 1909 tbz(r_bitmap, 1, L_fallthrough); 1910 1911 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1912 // The next slot to be inspected, by the logic we're about to call, 1913 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1914 // have been checked. 1915 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index, 1916 r_bitmap, r_array_length, result, /*is_stub*/false); 1917 1918 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1919 1920 bind(L_fallthrough); 1921 1922 if (VerifySecondarySupers) { 1923 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1924 temp1, temp2, result); // r1, r2, r5 1925 } 1926 1927 if (L_success) { 1928 cbz(result, *L_success); 1929 } 1930 } 1931 1932 // Called by code generated by check_klass_subtype_slow_path 1933 // above. This is called when there is a collision in the hashed 1934 // lookup in the secondary supers array. 1935 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1936 Register r_array_base, 1937 Register r_array_index, 1938 Register r_bitmap, 1939 Register temp1, 1940 Register result, 1941 bool is_stub) { 1942 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1943 1944 const Register 1945 r_array_length = temp1, 1946 r_sub_klass = noreg; // unused 1947 1948 if (is_stub) { 1949 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1950 } 1951 1952 Label L_fallthrough, L_huge; 1953 1954 // Load the array length. 1955 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1956 // And adjust the array base to point to the data. 1957 // NB! Effectively increments current slot index by 1. 1958 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1959 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1960 1961 // The bitmap is full to bursting. 1962 // Implicit invariant: BITMAP_FULL implies (length > 0) 1963 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1964 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2)); 1965 br(GT, L_huge); 1966 1967 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1968 // current slot (at secondary_supers[r_array_index]) has not yet 1969 // been inspected, and r_array_index may be out of bounds if we 1970 // wrapped around the end of the array. 1971 1972 { // This is conventional linear probing, but instead of terminating 1973 // when a null entry is found in the table, we maintain a bitmap 1974 // in which a 0 indicates missing entries. 1975 // As long as the bitmap is not completely full, 1976 // array_length == popcount(bitmap). The array_length check above 1977 // guarantees there are 0s in the bitmap, so the loop eventually 1978 // terminates. 1979 Label L_loop; 1980 bind(L_loop); 1981 1982 // Check for wraparound. 1983 cmp(r_array_index, r_array_length); 1984 csel(r_array_index, zr, r_array_index, GE); 1985 1986 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1987 eor(result, rscratch1, r_super_klass); 1988 cbz(result, L_fallthrough); 1989 1990 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1991 1992 ror(r_bitmap, r_bitmap, 1); 1993 add(r_array_index, r_array_index, 1); 1994 b(L_loop); 1995 } 1996 1997 { // Degenerate case: more than 64 secondary supers. 1998 // FIXME: We could do something smarter here, maybe a vectorized 1999 // comparison or a binary search, but is that worth any added 2000 // complexity? 2001 bind(L_huge); 2002 cmp(sp, zr); // Clear Z flag; SP is never zero 2003 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 2004 cset(result, NE); // result == 0 iff we got a match. 2005 } 2006 2007 bind(L_fallthrough); 2008 } 2009 2010 // Make sure that the hashed lookup and a linear scan agree. 2011 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 2012 Register r_super_klass, 2013 Register temp1, 2014 Register temp2, 2015 Register result) { 2016 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 2017 2018 const Register 2019 r_array_base = temp1, 2020 r_array_length = temp2, 2021 r_array_index = noreg, // unused 2022 r_bitmap = noreg; // unused 2023 2024 BLOCK_COMMENT("verify_secondary_supers_table {"); 2025 2026 // We will consult the secondary-super array. 2027 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 2028 2029 // Load the array length. 2030 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 2031 // And adjust the array base to point to the data. 2032 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 2033 2034 cmp(sp, zr); // Clear Z flag; SP is never zero 2035 // Scan R2 words at [R5] for an occurrence of R0. 2036 // Set NZ/Z based on last compare. 2037 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 2038 // rscratch1 == 0 iff we got a match. 2039 cset(rscratch1, NE); 2040 2041 Label passed; 2042 cmp(result, zr); 2043 cset(result, NE); // normalize result to 0/1 for comparison 2044 2045 cmp(rscratch1, result); 2046 br(EQ, passed); 2047 { 2048 mov(r0, r_super_klass); // r0 <- r0 2049 mov(r1, r_sub_klass); // r1 <- r4 2050 mov(r2, /*expected*/rscratch1); // r2 <- r8 2051 mov(r3, result); // r3 <- r5 2052 mov(r4, (address)("mismatch")); // r4 <- const 2053 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 2054 should_not_reach_here(); 2055 } 2056 bind(passed); 2057 2058 BLOCK_COMMENT("} verify_secondary_supers_table"); 2059 } 2060 2061 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 2062 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 2063 assert_different_registers(klass, rthread, scratch); 2064 2065 Label L_fallthrough, L_tmp; 2066 if (L_fast_path == nullptr) { 2067 L_fast_path = &L_fallthrough; 2068 } else if (L_slow_path == nullptr) { 2069 L_slow_path = &L_fallthrough; 2070 } 2071 // Fast path check: class is fully initialized 2072 lea(scratch, Address(klass, InstanceKlass::init_state_offset())); 2073 ldarb(scratch, scratch); 2074 subs(zr, scratch, InstanceKlass::fully_initialized); 2075 br(Assembler::EQ, *L_fast_path); 2076 2077 // Fast path check: current thread is initializer thread 2078 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 2079 cmp(rthread, scratch); 2080 2081 if (L_slow_path == &L_fallthrough) { 2082 br(Assembler::EQ, *L_fast_path); 2083 bind(*L_slow_path); 2084 } else if (L_fast_path == &L_fallthrough) { 2085 br(Assembler::NE, *L_slow_path); 2086 bind(*L_fast_path); 2087 } else { 2088 Unimplemented(); 2089 } 2090 } 2091 2092 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 2093 if (!VerifyOops) return; 2094 2095 // Pass register number to verify_oop_subroutine 2096 const char* b = nullptr; 2097 { 2098 ResourceMark rm; 2099 stringStream ss; 2100 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 2101 b = code_string(ss.as_string()); 2102 } 2103 BLOCK_COMMENT("verify_oop {"); 2104 2105 strip_return_address(); // This might happen within a stack frame. 2106 protect_return_address(); 2107 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 2108 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 2109 2110 mov(r0, reg); 2111 movptr(rscratch1, (uintptr_t)(address)b); 2112 2113 // call indirectly to solve generation ordering problem 2114 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 2115 ldr(rscratch2, Address(rscratch2)); 2116 blr(rscratch2); 2117 2118 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 2119 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 2120 authenticate_return_address(); 2121 2122 BLOCK_COMMENT("} verify_oop"); 2123 } 2124 2125 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 2126 if (!VerifyOops) return; 2127 2128 const char* b = nullptr; 2129 { 2130 ResourceMark rm; 2131 stringStream ss; 2132 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 2133 b = code_string(ss.as_string()); 2134 } 2135 BLOCK_COMMENT("verify_oop_addr {"); 2136 2137 strip_return_address(); // This might happen within a stack frame. 2138 protect_return_address(); 2139 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 2140 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 2141 2142 // addr may contain sp so we will have to adjust it based on the 2143 // pushes that we just did. 2144 if (addr.uses(sp)) { 2145 lea(r0, addr); 2146 ldr(r0, Address(r0, 4 * wordSize)); 2147 } else { 2148 ldr(r0, addr); 2149 } 2150 movptr(rscratch1, (uintptr_t)(address)b); 2151 2152 // call indirectly to solve generation ordering problem 2153 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 2154 ldr(rscratch2, Address(rscratch2)); 2155 blr(rscratch2); 2156 2157 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 2158 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 2159 authenticate_return_address(); 2160 2161 BLOCK_COMMENT("} verify_oop_addr"); 2162 } 2163 2164 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2165 int extra_slot_offset) { 2166 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2167 int stackElementSize = Interpreter::stackElementSize; 2168 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 2169 #ifdef ASSERT 2170 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 2171 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 2172 #endif 2173 if (arg_slot.is_constant()) { 2174 return Address(esp, arg_slot.as_constant() * stackElementSize 2175 + offset); 2176 } else { 2177 add(rscratch1, esp, arg_slot.as_register(), 2178 ext::uxtx, exact_log2(stackElementSize)); 2179 return Address(rscratch1, offset); 2180 } 2181 } 2182 2183 void MacroAssembler::call_VM_leaf_base(address entry_point, 2184 int number_of_arguments, 2185 Label *retaddr) { 2186 Label E, L; 2187 2188 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 2189 2190 mov(rscratch1, RuntimeAddress(entry_point)); 2191 blr(rscratch1); 2192 if (retaddr) 2193 bind(*retaddr); 2194 2195 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 2196 } 2197 2198 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 2199 call_VM_leaf_base(entry_point, number_of_arguments); 2200 } 2201 2202 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2203 pass_arg0(this, arg_0); 2204 call_VM_leaf_base(entry_point, 1); 2205 } 2206 2207 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2208 assert_different_registers(arg_1, c_rarg0); 2209 pass_arg0(this, arg_0); 2210 pass_arg1(this, arg_1); 2211 call_VM_leaf_base(entry_point, 2); 2212 } 2213 2214 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2215 Register arg_1, Register arg_2) { 2216 assert_different_registers(arg_1, c_rarg0); 2217 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2218 pass_arg0(this, arg_0); 2219 pass_arg1(this, arg_1); 2220 pass_arg2(this, arg_2); 2221 call_VM_leaf_base(entry_point, 3); 2222 } 2223 2224 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2225 pass_arg0(this, arg_0); 2226 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2227 } 2228 2229 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2230 2231 assert_different_registers(arg_0, c_rarg1); 2232 pass_arg1(this, arg_1); 2233 pass_arg0(this, arg_0); 2234 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2235 } 2236 2237 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2238 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2239 assert_different_registers(arg_1, c_rarg2); 2240 pass_arg2(this, arg_2); 2241 pass_arg1(this, arg_1); 2242 pass_arg0(this, arg_0); 2243 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2244 } 2245 2246 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2247 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2248 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2249 assert_different_registers(arg_2, c_rarg3); 2250 pass_arg3(this, arg_3); 2251 pass_arg2(this, arg_2); 2252 pass_arg1(this, arg_1); 2253 pass_arg0(this, arg_0); 2254 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2255 } 2256 2257 void MacroAssembler::null_check(Register reg, int offset) { 2258 if (needs_explicit_null_check(offset)) { 2259 // provoke OS null exception if reg is null by 2260 // accessing M[reg] w/o changing any registers 2261 // NOTE: this is plenty to provoke a segv 2262 ldr(zr, Address(reg)); 2263 } else { 2264 // nothing to do, (later) access of M[reg + offset] 2265 // will provoke OS null exception if reg is null 2266 } 2267 } 2268 2269 // MacroAssembler protected routines needed to implement 2270 // public methods 2271 2272 void MacroAssembler::mov(Register r, Address dest) { 2273 code_section()->relocate(pc(), dest.rspec()); 2274 uint64_t imm64 = (uint64_t)dest.target(); 2275 movptr(r, imm64); 2276 } 2277 2278 // Move a constant pointer into r. In AArch64 mode the virtual 2279 // address space is 48 bits in size, so we only need three 2280 // instructions to create a patchable instruction sequence that can 2281 // reach anywhere. 2282 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2283 #ifndef PRODUCT 2284 { 2285 char buffer[64]; 2286 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2287 block_comment(buffer); 2288 } 2289 #endif 2290 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2291 movz(r, imm64 & 0xffff); 2292 imm64 >>= 16; 2293 movk(r, imm64 & 0xffff, 16); 2294 imm64 >>= 16; 2295 movk(r, imm64 & 0xffff, 32); 2296 } 2297 2298 // Macro to mov replicated immediate to vector register. 2299 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2300 // the upper 56/48/32 bits must be zeros for B/H/S type. 2301 // Vd will get the following values for different arrangements in T 2302 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2303 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2304 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2305 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2306 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2307 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2308 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2309 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2310 // Clobbers rscratch1 2311 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2312 assert(T != T1Q, "unsupported"); 2313 if (T == T1D || T == T2D) { 2314 int imm = operand_valid_for_movi_immediate(imm64, T); 2315 if (-1 != imm) { 2316 movi(Vd, T, imm); 2317 } else { 2318 mov(rscratch1, imm64); 2319 dup(Vd, T, rscratch1); 2320 } 2321 return; 2322 } 2323 2324 #ifdef ASSERT 2325 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2326 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2327 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2328 #endif 2329 int shift = operand_valid_for_movi_immediate(imm64, T); 2330 uint32_t imm32 = imm64 & 0xffffffffULL; 2331 if (shift >= 0) { 2332 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2333 } else { 2334 movw(rscratch1, imm32); 2335 dup(Vd, T, rscratch1); 2336 } 2337 } 2338 2339 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2340 { 2341 #ifndef PRODUCT 2342 { 2343 char buffer[64]; 2344 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2345 block_comment(buffer); 2346 } 2347 #endif 2348 if (operand_valid_for_logical_immediate(false, imm64)) { 2349 orr(dst, zr, imm64); 2350 } else { 2351 // we can use a combination of MOVZ or MOVN with 2352 // MOVK to build up the constant 2353 uint64_t imm_h[4]; 2354 int zero_count = 0; 2355 int neg_count = 0; 2356 int i; 2357 for (i = 0; i < 4; i++) { 2358 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2359 if (imm_h[i] == 0) { 2360 zero_count++; 2361 } else if (imm_h[i] == 0xffffL) { 2362 neg_count++; 2363 } 2364 } 2365 if (zero_count == 4) { 2366 // one MOVZ will do 2367 movz(dst, 0); 2368 } else if (neg_count == 4) { 2369 // one MOVN will do 2370 movn(dst, 0); 2371 } else if (zero_count == 3) { 2372 for (i = 0; i < 4; i++) { 2373 if (imm_h[i] != 0L) { 2374 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2375 break; 2376 } 2377 } 2378 } else if (neg_count == 3) { 2379 // one MOVN will do 2380 for (int i = 0; i < 4; i++) { 2381 if (imm_h[i] != 0xffffL) { 2382 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2383 break; 2384 } 2385 } 2386 } else if (zero_count == 2) { 2387 // one MOVZ and one MOVK will do 2388 for (i = 0; i < 3; i++) { 2389 if (imm_h[i] != 0L) { 2390 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2391 i++; 2392 break; 2393 } 2394 } 2395 for (;i < 4; i++) { 2396 if (imm_h[i] != 0L) { 2397 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2398 } 2399 } 2400 } else if (neg_count == 2) { 2401 // one MOVN and one MOVK will do 2402 for (i = 0; i < 4; i++) { 2403 if (imm_h[i] != 0xffffL) { 2404 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2405 i++; 2406 break; 2407 } 2408 } 2409 for (;i < 4; i++) { 2410 if (imm_h[i] != 0xffffL) { 2411 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2412 } 2413 } 2414 } else if (zero_count == 1) { 2415 // one MOVZ and two MOVKs will do 2416 for (i = 0; i < 4; i++) { 2417 if (imm_h[i] != 0L) { 2418 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2419 i++; 2420 break; 2421 } 2422 } 2423 for (;i < 4; i++) { 2424 if (imm_h[i] != 0x0L) { 2425 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2426 } 2427 } 2428 } else if (neg_count == 1) { 2429 // one MOVN and two MOVKs will do 2430 for (i = 0; i < 4; i++) { 2431 if (imm_h[i] != 0xffffL) { 2432 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2433 i++; 2434 break; 2435 } 2436 } 2437 for (;i < 4; i++) { 2438 if (imm_h[i] != 0xffffL) { 2439 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2440 } 2441 } 2442 } else { 2443 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2444 movz(dst, (uint32_t)imm_h[0], 0); 2445 for (i = 1; i < 4; i++) { 2446 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2447 } 2448 } 2449 } 2450 } 2451 2452 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2453 { 2454 #ifndef PRODUCT 2455 { 2456 char buffer[64]; 2457 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2458 block_comment(buffer); 2459 } 2460 #endif 2461 if (operand_valid_for_logical_immediate(true, imm32)) { 2462 orrw(dst, zr, imm32); 2463 } else { 2464 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2465 // constant 2466 uint32_t imm_h[2]; 2467 imm_h[0] = imm32 & 0xffff; 2468 imm_h[1] = ((imm32 >> 16) & 0xffff); 2469 if (imm_h[0] == 0) { 2470 movzw(dst, imm_h[1], 16); 2471 } else if (imm_h[0] == 0xffff) { 2472 movnw(dst, imm_h[1] ^ 0xffff, 16); 2473 } else if (imm_h[1] == 0) { 2474 movzw(dst, imm_h[0], 0); 2475 } else if (imm_h[1] == 0xffff) { 2476 movnw(dst, imm_h[0] ^ 0xffff, 0); 2477 } else { 2478 // use a MOVZ and MOVK (makes it easier to debug) 2479 movzw(dst, imm_h[0], 0); 2480 movkw(dst, imm_h[1], 16); 2481 } 2482 } 2483 } 2484 2485 // Form an address from base + offset in Rd. Rd may or may 2486 // not actually be used: you must use the Address that is returned. 2487 // It is up to you to ensure that the shift provided matches the size 2488 // of your data. 2489 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2490 if (Address::offset_ok_for_immed(byte_offset, shift)) 2491 // It fits; no need for any heroics 2492 return Address(base, byte_offset); 2493 2494 // Don't do anything clever with negative or misaligned offsets 2495 unsigned mask = (1 << shift) - 1; 2496 if (byte_offset < 0 || byte_offset & mask) { 2497 mov(Rd, byte_offset); 2498 add(Rd, base, Rd); 2499 return Address(Rd); 2500 } 2501 2502 // See if we can do this with two 12-bit offsets 2503 { 2504 uint64_t word_offset = byte_offset >> shift; 2505 uint64_t masked_offset = word_offset & 0xfff000; 2506 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2507 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2508 add(Rd, base, masked_offset << shift); 2509 word_offset -= masked_offset; 2510 return Address(Rd, word_offset << shift); 2511 } 2512 } 2513 2514 // Do it the hard way 2515 mov(Rd, byte_offset); 2516 add(Rd, base, Rd); 2517 return Address(Rd); 2518 } 2519 2520 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2521 bool want_remainder, Register scratch) 2522 { 2523 // Full implementation of Java idiv and irem. The function 2524 // returns the (pc) offset of the div instruction - may be needed 2525 // for implicit exceptions. 2526 // 2527 // constraint : ra/rb =/= scratch 2528 // normal case 2529 // 2530 // input : ra: dividend 2531 // rb: divisor 2532 // 2533 // result: either 2534 // quotient (= ra idiv rb) 2535 // remainder (= ra irem rb) 2536 2537 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2538 2539 int idivl_offset = offset(); 2540 if (! want_remainder) { 2541 sdivw(result, ra, rb); 2542 } else { 2543 sdivw(scratch, ra, rb); 2544 Assembler::msubw(result, scratch, rb, ra); 2545 } 2546 2547 return idivl_offset; 2548 } 2549 2550 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2551 bool want_remainder, Register scratch) 2552 { 2553 // Full implementation of Java ldiv and lrem. The function 2554 // returns the (pc) offset of the div instruction - may be needed 2555 // for implicit exceptions. 2556 // 2557 // constraint : ra/rb =/= scratch 2558 // normal case 2559 // 2560 // input : ra: dividend 2561 // rb: divisor 2562 // 2563 // result: either 2564 // quotient (= ra idiv rb) 2565 // remainder (= ra irem rb) 2566 2567 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2568 2569 int idivq_offset = offset(); 2570 if (! want_remainder) { 2571 sdiv(result, ra, rb); 2572 } else { 2573 sdiv(scratch, ra, rb); 2574 Assembler::msub(result, scratch, rb, ra); 2575 } 2576 2577 return idivq_offset; 2578 } 2579 2580 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2581 address prev = pc() - NativeMembar::instruction_size; 2582 address last = code()->last_insn(); 2583 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2584 NativeMembar *bar = NativeMembar_at(prev); 2585 if (AlwaysMergeDMB) { 2586 bar->set_kind(bar->get_kind() | order_constraint); 2587 BLOCK_COMMENT("merged membar(always)"); 2588 return; 2589 } 2590 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2591 // doing so would introduce a StoreLoad which the caller did not 2592 // intend 2593 if (bar->get_kind() == order_constraint 2594 || bar->get_kind() == AnyAny 2595 || order_constraint == AnyAny) { 2596 // We are merging two memory barrier instructions. On AArch64 we 2597 // can do this simply by ORing them together. 2598 bar->set_kind(bar->get_kind() | order_constraint); 2599 BLOCK_COMMENT("merged membar"); 2600 return; 2601 } else { 2602 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2603 // We need check the last 2 instructions 2604 address prev2 = prev - NativeMembar::instruction_size; 2605 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2606 NativeMembar *bar2 = NativeMembar_at(prev2); 2607 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2608 BLOCK_COMMENT("merged membar(elided)"); 2609 return; 2610 } 2611 } 2612 } 2613 code()->set_last_insn(pc()); 2614 dmb(Assembler::barrier(order_constraint)); 2615 } 2616 2617 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2618 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2619 merge_ldst(rt, adr, size_in_bytes, is_store); 2620 code()->clear_last_insn(); 2621 return true; 2622 } else { 2623 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2624 const uint64_t mask = size_in_bytes - 1; 2625 if (adr.getMode() == Address::base_plus_offset && 2626 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2627 code()->set_last_insn(pc()); 2628 } 2629 return false; 2630 } 2631 } 2632 2633 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2634 // We always try to merge two adjacent loads into one ldp. 2635 if (!try_merge_ldst(Rx, adr, 8, false)) { 2636 Assembler::ldr(Rx, adr); 2637 } 2638 } 2639 2640 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2641 // We always try to merge two adjacent loads into one ldp. 2642 if (!try_merge_ldst(Rw, adr, 4, false)) { 2643 Assembler::ldrw(Rw, adr); 2644 } 2645 } 2646 2647 void MacroAssembler::str(Register Rx, const Address &adr) { 2648 // We always try to merge two adjacent stores into one stp. 2649 if (!try_merge_ldst(Rx, adr, 8, true)) { 2650 Assembler::str(Rx, adr); 2651 } 2652 } 2653 2654 void MacroAssembler::strw(Register Rw, const Address &adr) { 2655 // We always try to merge two adjacent stores into one stp. 2656 if (!try_merge_ldst(Rw, adr, 4, true)) { 2657 Assembler::strw(Rw, adr); 2658 } 2659 } 2660 2661 // MacroAssembler routines found actually to be needed 2662 2663 void MacroAssembler::push(Register src) 2664 { 2665 str(src, Address(pre(esp, -1 * wordSize))); 2666 } 2667 2668 void MacroAssembler::pop(Register dst) 2669 { 2670 ldr(dst, Address(post(esp, 1 * wordSize))); 2671 } 2672 2673 // Note: load_unsigned_short used to be called load_unsigned_word. 2674 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2675 int off = offset(); 2676 ldrh(dst, src); 2677 return off; 2678 } 2679 2680 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2681 int off = offset(); 2682 ldrb(dst, src); 2683 return off; 2684 } 2685 2686 int MacroAssembler::load_signed_short(Register dst, Address src) { 2687 int off = offset(); 2688 ldrsh(dst, src); 2689 return off; 2690 } 2691 2692 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2693 int off = offset(); 2694 ldrsb(dst, src); 2695 return off; 2696 } 2697 2698 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2699 int off = offset(); 2700 ldrshw(dst, src); 2701 return off; 2702 } 2703 2704 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2705 int off = offset(); 2706 ldrsbw(dst, src); 2707 return off; 2708 } 2709 2710 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2711 switch (size_in_bytes) { 2712 case 8: ldr(dst, src); break; 2713 case 4: ldrw(dst, src); break; 2714 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2715 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2716 default: ShouldNotReachHere(); 2717 } 2718 } 2719 2720 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2721 switch (size_in_bytes) { 2722 case 8: str(src, dst); break; 2723 case 4: strw(src, dst); break; 2724 case 2: strh(src, dst); break; 2725 case 1: strb(src, dst); break; 2726 default: ShouldNotReachHere(); 2727 } 2728 } 2729 2730 void MacroAssembler::decrementw(Register reg, int value) 2731 { 2732 if (value < 0) { incrementw(reg, -value); return; } 2733 if (value == 0) { return; } 2734 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2735 /* else */ { 2736 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2737 movw(rscratch2, (unsigned)value); 2738 subw(reg, reg, rscratch2); 2739 } 2740 } 2741 2742 void MacroAssembler::decrement(Register reg, int value) 2743 { 2744 if (value < 0) { increment(reg, -value); return; } 2745 if (value == 0) { return; } 2746 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2747 /* else */ { 2748 assert(reg != rscratch2, "invalid dst for register decrement"); 2749 mov(rscratch2, (uint64_t)value); 2750 sub(reg, reg, rscratch2); 2751 } 2752 } 2753 2754 void MacroAssembler::decrementw(Address dst, int value) 2755 { 2756 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2757 if (dst.getMode() == Address::literal) { 2758 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2759 lea(rscratch2, dst); 2760 dst = Address(rscratch2); 2761 } 2762 ldrw(rscratch1, dst); 2763 decrementw(rscratch1, value); 2764 strw(rscratch1, dst); 2765 } 2766 2767 void MacroAssembler::decrement(Address dst, int value) 2768 { 2769 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2770 if (dst.getMode() == Address::literal) { 2771 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2772 lea(rscratch2, dst); 2773 dst = Address(rscratch2); 2774 } 2775 ldr(rscratch1, dst); 2776 decrement(rscratch1, value); 2777 str(rscratch1, dst); 2778 } 2779 2780 void MacroAssembler::incrementw(Register reg, int value) 2781 { 2782 if (value < 0) { decrementw(reg, -value); return; } 2783 if (value == 0) { return; } 2784 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2785 /* else */ { 2786 assert(reg != rscratch2, "invalid dst for register increment"); 2787 movw(rscratch2, (unsigned)value); 2788 addw(reg, reg, rscratch2); 2789 } 2790 } 2791 2792 void MacroAssembler::increment(Register reg, int value) 2793 { 2794 if (value < 0) { decrement(reg, -value); return; } 2795 if (value == 0) { return; } 2796 if (value < (1 << 12)) { add(reg, reg, value); return; } 2797 /* else */ { 2798 assert(reg != rscratch2, "invalid dst for register increment"); 2799 movw(rscratch2, (unsigned)value); 2800 add(reg, reg, rscratch2); 2801 } 2802 } 2803 2804 void MacroAssembler::incrementw(Address dst, int value) 2805 { 2806 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2807 if (dst.getMode() == Address::literal) { 2808 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2809 lea(rscratch2, dst); 2810 dst = Address(rscratch2); 2811 } 2812 ldrw(rscratch1, dst); 2813 incrementw(rscratch1, value); 2814 strw(rscratch1, dst); 2815 } 2816 2817 void MacroAssembler::increment(Address dst, int value) 2818 { 2819 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2820 if (dst.getMode() == Address::literal) { 2821 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2822 lea(rscratch2, dst); 2823 dst = Address(rscratch2); 2824 } 2825 ldr(rscratch1, dst); 2826 increment(rscratch1, value); 2827 str(rscratch1, dst); 2828 } 2829 2830 // Push lots of registers in the bit set supplied. Don't push sp. 2831 // Return the number of words pushed 2832 int MacroAssembler::push(unsigned int bitset, Register stack) { 2833 int words_pushed = 0; 2834 2835 // Scan bitset to accumulate register pairs 2836 unsigned char regs[32]; 2837 int count = 0; 2838 for (int reg = 0; reg <= 30; reg++) { 2839 if (1 & bitset) 2840 regs[count++] = reg; 2841 bitset >>= 1; 2842 } 2843 regs[count++] = zr->raw_encoding(); 2844 count &= ~1; // Only push an even number of regs 2845 2846 if (count) { 2847 stp(as_Register(regs[0]), as_Register(regs[1]), 2848 Address(pre(stack, -count * wordSize))); 2849 words_pushed += 2; 2850 } 2851 for (int i = 2; i < count; i += 2) { 2852 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2853 Address(stack, i * wordSize)); 2854 words_pushed += 2; 2855 } 2856 2857 assert(words_pushed == count, "oops, pushed != count"); 2858 2859 return count; 2860 } 2861 2862 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2863 int words_pushed = 0; 2864 2865 // Scan bitset to accumulate register pairs 2866 unsigned char regs[32]; 2867 int count = 0; 2868 for (int reg = 0; reg <= 30; reg++) { 2869 if (1 & bitset) 2870 regs[count++] = reg; 2871 bitset >>= 1; 2872 } 2873 regs[count++] = zr->raw_encoding(); 2874 count &= ~1; 2875 2876 for (int i = 2; i < count; i += 2) { 2877 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2878 Address(stack, i * wordSize)); 2879 words_pushed += 2; 2880 } 2881 if (count) { 2882 ldp(as_Register(regs[0]), as_Register(regs[1]), 2883 Address(post(stack, count * wordSize))); 2884 words_pushed += 2; 2885 } 2886 2887 assert(words_pushed == count, "oops, pushed != count"); 2888 2889 return count; 2890 } 2891 2892 // Push lots of registers in the bit set supplied. Don't push sp. 2893 // Return the number of dwords pushed 2894 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2895 int words_pushed = 0; 2896 bool use_sve = false; 2897 int sve_vector_size_in_bytes = 0; 2898 2899 #ifdef COMPILER2 2900 use_sve = Matcher::supports_scalable_vector(); 2901 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2902 #endif 2903 2904 // Scan bitset to accumulate register pairs 2905 unsigned char regs[32]; 2906 int count = 0; 2907 for (int reg = 0; reg <= 31; reg++) { 2908 if (1 & bitset) 2909 regs[count++] = reg; 2910 bitset >>= 1; 2911 } 2912 2913 if (count == 0) { 2914 return 0; 2915 } 2916 2917 if (mode == PushPopFull) { 2918 if (use_sve && sve_vector_size_in_bytes > 16) { 2919 mode = PushPopSVE; 2920 } else { 2921 mode = PushPopNeon; 2922 } 2923 } 2924 2925 #ifndef PRODUCT 2926 { 2927 char buffer[48]; 2928 if (mode == PushPopSVE) { 2929 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2930 } else if (mode == PushPopNeon) { 2931 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2932 } else { 2933 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2934 } 2935 block_comment(buffer); 2936 } 2937 #endif 2938 2939 if (mode == PushPopSVE) { 2940 sub(stack, stack, sve_vector_size_in_bytes * count); 2941 for (int i = 0; i < count; i++) { 2942 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2943 } 2944 return count * sve_vector_size_in_bytes / 8; 2945 } 2946 2947 if (mode == PushPopNeon) { 2948 if (count == 1) { 2949 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2950 return 2; 2951 } 2952 2953 bool odd = (count & 1) == 1; 2954 int push_slots = count + (odd ? 1 : 0); 2955 2956 // Always pushing full 128 bit registers. 2957 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2958 words_pushed += 2; 2959 2960 for (int i = 2; i + 1 < count; i += 2) { 2961 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2962 words_pushed += 2; 2963 } 2964 2965 if (odd) { 2966 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2967 words_pushed++; 2968 } 2969 2970 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2971 return count * 2; 2972 } 2973 2974 if (mode == PushPopFp) { 2975 bool odd = (count & 1) == 1; 2976 int push_slots = count + (odd ? 1 : 0); 2977 2978 if (count == 1) { 2979 // Stack pointer must be 16 bytes aligned 2980 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2981 return 1; 2982 } 2983 2984 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2985 words_pushed += 2; 2986 2987 for (int i = 2; i + 1 < count; i += 2) { 2988 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2989 words_pushed += 2; 2990 } 2991 2992 if (odd) { 2993 // Stack pointer must be 16 bytes aligned 2994 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2995 words_pushed++; 2996 } 2997 2998 assert(words_pushed == count, "oops, pushed != count"); 2999 3000 return count; 3001 } 3002 3003 return 0; 3004 } 3005 3006 // Return the number of dwords popped 3007 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 3008 int words_pushed = 0; 3009 bool use_sve = false; 3010 int sve_vector_size_in_bytes = 0; 3011 3012 #ifdef COMPILER2 3013 use_sve = Matcher::supports_scalable_vector(); 3014 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 3015 #endif 3016 // Scan bitset to accumulate register pairs 3017 unsigned char regs[32]; 3018 int count = 0; 3019 for (int reg = 0; reg <= 31; reg++) { 3020 if (1 & bitset) 3021 regs[count++] = reg; 3022 bitset >>= 1; 3023 } 3024 3025 if (count == 0) { 3026 return 0; 3027 } 3028 3029 if (mode == PushPopFull) { 3030 if (use_sve && sve_vector_size_in_bytes > 16) { 3031 mode = PushPopSVE; 3032 } else { 3033 mode = PushPopNeon; 3034 } 3035 } 3036 3037 #ifndef PRODUCT 3038 { 3039 char buffer[48]; 3040 if (mode == PushPopSVE) { 3041 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 3042 } else if (mode == PushPopNeon) { 3043 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 3044 } else { 3045 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 3046 } 3047 block_comment(buffer); 3048 } 3049 #endif 3050 3051 if (mode == PushPopSVE) { 3052 for (int i = count - 1; i >= 0; i--) { 3053 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 3054 } 3055 add(stack, stack, sve_vector_size_in_bytes * count); 3056 return count * sve_vector_size_in_bytes / 8; 3057 } 3058 3059 if (mode == PushPopNeon) { 3060 if (count == 1) { 3061 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 3062 return 2; 3063 } 3064 3065 bool odd = (count & 1) == 1; 3066 int push_slots = count + (odd ? 1 : 0); 3067 3068 if (odd) { 3069 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 3070 words_pushed++; 3071 } 3072 3073 for (int i = 2; i + 1 < count; i += 2) { 3074 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 3075 words_pushed += 2; 3076 } 3077 3078 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 3079 words_pushed += 2; 3080 3081 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 3082 3083 return count * 2; 3084 } 3085 3086 if (mode == PushPopFp) { 3087 bool odd = (count & 1) == 1; 3088 int push_slots = count + (odd ? 1 : 0); 3089 3090 if (count == 1) { 3091 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 3092 return 1; 3093 } 3094 3095 if (odd) { 3096 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 3097 words_pushed++; 3098 } 3099 3100 for (int i = 2; i + 1 < count; i += 2) { 3101 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 3102 words_pushed += 2; 3103 } 3104 3105 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 3106 words_pushed += 2; 3107 3108 assert(words_pushed == count, "oops, pushed != count"); 3109 3110 return count; 3111 } 3112 3113 return 0; 3114 } 3115 3116 // Return the number of dwords pushed 3117 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 3118 bool use_sve = false; 3119 int sve_predicate_size_in_slots = 0; 3120 3121 #ifdef COMPILER2 3122 use_sve = Matcher::supports_scalable_vector(); 3123 if (use_sve) { 3124 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3125 } 3126 #endif 3127 3128 if (!use_sve) { 3129 return 0; 3130 } 3131 3132 unsigned char regs[PRegister::number_of_registers]; 3133 int count = 0; 3134 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3135 if (1 & bitset) 3136 regs[count++] = reg; 3137 bitset >>= 1; 3138 } 3139 3140 if (count == 0) { 3141 return 0; 3142 } 3143 3144 int total_push_bytes = align_up(sve_predicate_size_in_slots * 3145 VMRegImpl::stack_slot_size * count, 16); 3146 sub(stack, stack, total_push_bytes); 3147 for (int i = 0; i < count; i++) { 3148 sve_str(as_PRegister(regs[i]), Address(stack, i)); 3149 } 3150 return total_push_bytes / 8; 3151 } 3152 3153 // Return the number of dwords popped 3154 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 3155 bool use_sve = false; 3156 int sve_predicate_size_in_slots = 0; 3157 3158 #ifdef COMPILER2 3159 use_sve = Matcher::supports_scalable_vector(); 3160 if (use_sve) { 3161 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3162 } 3163 #endif 3164 3165 if (!use_sve) { 3166 return 0; 3167 } 3168 3169 unsigned char regs[PRegister::number_of_registers]; 3170 int count = 0; 3171 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3172 if (1 & bitset) 3173 regs[count++] = reg; 3174 bitset >>= 1; 3175 } 3176 3177 if (count == 0) { 3178 return 0; 3179 } 3180 3181 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 3182 VMRegImpl::stack_slot_size * count, 16); 3183 for (int i = count - 1; i >= 0; i--) { 3184 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 3185 } 3186 add(stack, stack, total_pop_bytes); 3187 return total_pop_bytes / 8; 3188 } 3189 3190 #ifdef ASSERT 3191 void MacroAssembler::verify_heapbase(const char* msg) { 3192 #if 0 3193 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 3194 assert (Universe::heap() != nullptr, "java heap should be initialized"); 3195 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 3196 // rheapbase is allocated as general register 3197 return; 3198 } 3199 if (CheckCompressedOops) { 3200 Label ok; 3201 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 3202 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3203 br(Assembler::EQ, ok); 3204 stop(msg); 3205 bind(ok); 3206 pop(1 << rscratch1->encoding(), sp); 3207 } 3208 #endif 3209 } 3210 #endif 3211 3212 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3213 assert_different_registers(value, tmp1, tmp2); 3214 Label done, tagged, weak_tagged; 3215 3216 cbz(value, done); // Use null as-is. 3217 tst(value, JNIHandles::tag_mask); // Test for tag. 3218 br(Assembler::NE, tagged); 3219 3220 // Resolve local handle 3221 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3222 verify_oop(value); 3223 b(done); 3224 3225 bind(tagged); 3226 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3227 tbnz(value, 0, weak_tagged); // Test for weak tag. 3228 3229 // Resolve global handle 3230 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3231 verify_oop(value); 3232 b(done); 3233 3234 bind(weak_tagged); 3235 // Resolve jweak. 3236 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3237 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3238 verify_oop(value); 3239 3240 bind(done); 3241 } 3242 3243 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3244 assert_different_registers(value, tmp1, tmp2); 3245 Label done; 3246 3247 cbz(value, done); // Use null as-is. 3248 3249 #ifdef ASSERT 3250 { 3251 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3252 Label valid_global_tag; 3253 tbnz(value, 1, valid_global_tag); // Test for global tag 3254 stop("non global jobject using resolve_global_jobject"); 3255 bind(valid_global_tag); 3256 } 3257 #endif 3258 3259 // Resolve global handle 3260 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3261 verify_oop(value); 3262 3263 bind(done); 3264 } 3265 3266 void MacroAssembler::stop(const char* msg) { 3267 BLOCK_COMMENT(msg); 3268 // load msg into r0 so we can access it from the signal handler 3269 // ExternalAddress enables saving and restoring via the code cache 3270 lea(c_rarg0, ExternalAddress((address) msg)); 3271 dcps1(0xdeae); 3272 SCCache::add_C_string(msg); 3273 } 3274 3275 void MacroAssembler::unimplemented(const char* what) { 3276 const char* buf = nullptr; 3277 { 3278 ResourceMark rm; 3279 stringStream ss; 3280 ss.print("unimplemented: %s", what); 3281 buf = code_string(ss.as_string()); 3282 } 3283 stop(buf); 3284 } 3285 3286 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3287 #ifdef ASSERT 3288 Label OK; 3289 br(cc, OK); 3290 stop(msg); 3291 bind(OK); 3292 #endif 3293 } 3294 3295 // If a constant does not fit in an immediate field, generate some 3296 // number of MOV instructions and then perform the operation. 3297 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3298 add_sub_imm_insn insn1, 3299 add_sub_reg_insn insn2, 3300 bool is32) { 3301 assert(Rd != zr, "Rd = zr and not setting flags?"); 3302 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3303 if (fits) { 3304 (this->*insn1)(Rd, Rn, imm); 3305 } else { 3306 if (uabs(imm) < (1 << 24)) { 3307 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3308 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3309 } else { 3310 assert_different_registers(Rd, Rn); 3311 mov(Rd, imm); 3312 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3313 } 3314 } 3315 } 3316 3317 // Separate vsn which sets the flags. Optimisations are more restricted 3318 // because we must set the flags correctly. 3319 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3320 add_sub_imm_insn insn1, 3321 add_sub_reg_insn insn2, 3322 bool is32) { 3323 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3324 if (fits) { 3325 (this->*insn1)(Rd, Rn, imm); 3326 } else { 3327 assert_different_registers(Rd, Rn); 3328 assert(Rd != zr, "overflow in immediate operand"); 3329 mov(Rd, imm); 3330 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3331 } 3332 } 3333 3334 3335 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3336 if (increment.is_register()) { 3337 add(Rd, Rn, increment.as_register()); 3338 } else { 3339 add(Rd, Rn, increment.as_constant()); 3340 } 3341 } 3342 3343 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3344 if (increment.is_register()) { 3345 addw(Rd, Rn, increment.as_register()); 3346 } else { 3347 addw(Rd, Rn, increment.as_constant()); 3348 } 3349 } 3350 3351 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3352 if (decrement.is_register()) { 3353 sub(Rd, Rn, decrement.as_register()); 3354 } else { 3355 sub(Rd, Rn, decrement.as_constant()); 3356 } 3357 } 3358 3359 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3360 if (decrement.is_register()) { 3361 subw(Rd, Rn, decrement.as_register()); 3362 } else { 3363 subw(Rd, Rn, decrement.as_constant()); 3364 } 3365 } 3366 3367 void MacroAssembler::reinit_heapbase() 3368 { 3369 if (UseCompressedOops) { 3370 if (Universe::is_fully_initialized() && !SCCache::is_on_for_write()) { 3371 mov(rheapbase, CompressedOops::base()); 3372 } else { 3373 lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3374 ldr(rheapbase, Address(rheapbase)); 3375 } 3376 } 3377 } 3378 3379 // this simulates the behaviour of the x86 cmpxchg instruction using a 3380 // load linked/store conditional pair. we use the acquire/release 3381 // versions of these instructions so that we flush pending writes as 3382 // per Java semantics. 3383 3384 // n.b the x86 version assumes the old value to be compared against is 3385 // in rax and updates rax with the value located in memory if the 3386 // cmpxchg fails. we supply a register for the old value explicitly 3387 3388 // the aarch64 load linked/store conditional instructions do not 3389 // accept an offset. so, unlike x86, we must provide a plain register 3390 // to identify the memory word to be compared/exchanged rather than a 3391 // register+offset Address. 3392 3393 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3394 Label &succeed, Label *fail) { 3395 // oldv holds comparison value 3396 // newv holds value to write in exchange 3397 // addr identifies memory word to compare against/update 3398 if (UseLSE) { 3399 mov(tmp, oldv); 3400 casal(Assembler::xword, oldv, newv, addr); 3401 cmp(tmp, oldv); 3402 br(Assembler::EQ, succeed); 3403 membar(AnyAny); 3404 } else { 3405 Label retry_load, nope; 3406 prfm(Address(addr), PSTL1STRM); 3407 bind(retry_load); 3408 // flush and load exclusive from the memory location 3409 // and fail if it is not what we expect 3410 ldaxr(tmp, addr); 3411 cmp(tmp, oldv); 3412 br(Assembler::NE, nope); 3413 // if we store+flush with no intervening write tmp will be zero 3414 stlxr(tmp, newv, addr); 3415 cbzw(tmp, succeed); 3416 // retry so we only ever return after a load fails to compare 3417 // ensures we don't return a stale value after a failed write. 3418 b(retry_load); 3419 // if the memory word differs we return it in oldv and signal a fail 3420 bind(nope); 3421 membar(AnyAny); 3422 mov(oldv, tmp); 3423 } 3424 if (fail) 3425 b(*fail); 3426 } 3427 3428 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3429 Label &succeed, Label *fail) { 3430 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3431 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3432 } 3433 3434 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3435 Label &succeed, Label *fail) { 3436 // oldv holds comparison value 3437 // newv holds value to write in exchange 3438 // addr identifies memory word to compare against/update 3439 // tmp returns 0/1 for success/failure 3440 if (UseLSE) { 3441 mov(tmp, oldv); 3442 casal(Assembler::word, oldv, newv, addr); 3443 cmp(tmp, oldv); 3444 br(Assembler::EQ, succeed); 3445 membar(AnyAny); 3446 } else { 3447 Label retry_load, nope; 3448 prfm(Address(addr), PSTL1STRM); 3449 bind(retry_load); 3450 // flush and load exclusive from the memory location 3451 // and fail if it is not what we expect 3452 ldaxrw(tmp, addr); 3453 cmp(tmp, oldv); 3454 br(Assembler::NE, nope); 3455 // if we store+flush with no intervening write tmp will be zero 3456 stlxrw(tmp, newv, addr); 3457 cbzw(tmp, succeed); 3458 // retry so we only ever return after a load fails to compare 3459 // ensures we don't return a stale value after a failed write. 3460 b(retry_load); 3461 // if the memory word differs we return it in oldv and signal a fail 3462 bind(nope); 3463 membar(AnyAny); 3464 mov(oldv, tmp); 3465 } 3466 if (fail) 3467 b(*fail); 3468 } 3469 3470 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3471 // doesn't retry and may fail spuriously. If the oldval is wanted, 3472 // Pass a register for the result, otherwise pass noreg. 3473 3474 // Clobbers rscratch1 3475 void MacroAssembler::cmpxchg(Register addr, Register expected, 3476 Register new_val, 3477 enum operand_size size, 3478 bool acquire, bool release, 3479 bool weak, 3480 Register result) { 3481 if (result == noreg) result = rscratch1; 3482 BLOCK_COMMENT("cmpxchg {"); 3483 if (UseLSE) { 3484 mov(result, expected); 3485 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3486 compare_eq(result, expected, size); 3487 #ifdef ASSERT 3488 // Poison rscratch1 which is written on !UseLSE branch 3489 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3490 #endif 3491 } else { 3492 Label retry_load, done; 3493 prfm(Address(addr), PSTL1STRM); 3494 bind(retry_load); 3495 load_exclusive(result, addr, size, acquire); 3496 compare_eq(result, expected, size); 3497 br(Assembler::NE, done); 3498 store_exclusive(rscratch1, new_val, addr, size, release); 3499 if (weak) { 3500 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3501 } else { 3502 cbnzw(rscratch1, retry_load); 3503 } 3504 bind(done); 3505 } 3506 BLOCK_COMMENT("} cmpxchg"); 3507 } 3508 3509 // A generic comparison. Only compares for equality, clobbers rscratch1. 3510 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3511 if (size == xword) { 3512 cmp(rm, rn); 3513 } else if (size == word) { 3514 cmpw(rm, rn); 3515 } else if (size == halfword) { 3516 eorw(rscratch1, rm, rn); 3517 ands(zr, rscratch1, 0xffff); 3518 } else if (size == byte) { 3519 eorw(rscratch1, rm, rn); 3520 ands(zr, rscratch1, 0xff); 3521 } else { 3522 ShouldNotReachHere(); 3523 } 3524 } 3525 3526 3527 static bool different(Register a, RegisterOrConstant b, Register c) { 3528 if (b.is_constant()) 3529 return a != c; 3530 else 3531 return a != b.as_register() && a != c && b.as_register() != c; 3532 } 3533 3534 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3535 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3536 if (UseLSE) { \ 3537 prev = prev->is_valid() ? prev : zr; \ 3538 if (incr.is_register()) { \ 3539 AOP(sz, incr.as_register(), prev, addr); \ 3540 } else { \ 3541 mov(rscratch2, incr.as_constant()); \ 3542 AOP(sz, rscratch2, prev, addr); \ 3543 } \ 3544 return; \ 3545 } \ 3546 Register result = rscratch2; \ 3547 if (prev->is_valid()) \ 3548 result = different(prev, incr, addr) ? prev : rscratch2; \ 3549 \ 3550 Label retry_load; \ 3551 prfm(Address(addr), PSTL1STRM); \ 3552 bind(retry_load); \ 3553 LDXR(result, addr); \ 3554 OP(rscratch1, result, incr); \ 3555 STXR(rscratch2, rscratch1, addr); \ 3556 cbnzw(rscratch2, retry_load); \ 3557 if (prev->is_valid() && prev != result) { \ 3558 IOP(prev, rscratch1, incr); \ 3559 } \ 3560 } 3561 3562 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3563 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3564 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3565 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3566 3567 #undef ATOMIC_OP 3568 3569 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3570 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3571 if (UseLSE) { \ 3572 prev = prev->is_valid() ? prev : zr; \ 3573 AOP(sz, newv, prev, addr); \ 3574 return; \ 3575 } \ 3576 Register result = rscratch2; \ 3577 if (prev->is_valid()) \ 3578 result = different(prev, newv, addr) ? prev : rscratch2; \ 3579 \ 3580 Label retry_load; \ 3581 prfm(Address(addr), PSTL1STRM); \ 3582 bind(retry_load); \ 3583 LDXR(result, addr); \ 3584 STXR(rscratch1, newv, addr); \ 3585 cbnzw(rscratch1, retry_load); \ 3586 if (prev->is_valid() && prev != result) \ 3587 mov(prev, result); \ 3588 } 3589 3590 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3591 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3592 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3593 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3594 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3595 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3596 3597 #undef ATOMIC_XCHG 3598 3599 #ifndef PRODUCT 3600 extern "C" void findpc(intptr_t x); 3601 #endif 3602 3603 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3604 { 3605 // In order to get locks to work, we need to fake a in_VM state 3606 if (ShowMessageBoxOnError ) { 3607 JavaThread* thread = JavaThread::current(); 3608 JavaThreadState saved_state = thread->thread_state(); 3609 thread->set_thread_state(_thread_in_vm); 3610 #ifndef PRODUCT 3611 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3612 ttyLocker ttyl; 3613 BytecodeCounter::print(); 3614 } 3615 #endif 3616 if (os::message_box(msg, "Execution stopped, print registers?")) { 3617 ttyLocker ttyl; 3618 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3619 #ifndef PRODUCT 3620 tty->cr(); 3621 findpc(pc); 3622 tty->cr(); 3623 #endif 3624 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3625 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3626 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3627 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3628 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3629 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3630 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3631 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3632 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3633 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3634 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3635 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3636 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3637 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3638 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3639 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3640 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3641 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3642 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3643 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3644 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3645 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3646 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3647 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3648 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3649 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3650 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3651 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3652 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3653 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3654 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3655 BREAKPOINT; 3656 } 3657 } 3658 fatal("DEBUG MESSAGE: %s", msg); 3659 } 3660 3661 RegSet MacroAssembler::call_clobbered_gp_registers() { 3662 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3663 #ifndef R18_RESERVED 3664 regs += r18_tls; 3665 #endif 3666 return regs; 3667 } 3668 3669 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3670 int step = 4 * wordSize; 3671 push(call_clobbered_gp_registers() - exclude, sp); 3672 sub(sp, sp, step); 3673 mov(rscratch1, -step); 3674 // Push v0-v7, v16-v31. 3675 for (int i = 31; i>= 4; i -= 4) { 3676 if (i <= v7->encoding() || i >= v16->encoding()) 3677 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3678 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3679 } 3680 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3681 as_FloatRegister(3), T1D, Address(sp)); 3682 } 3683 3684 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3685 for (int i = 0; i < 32; i += 4) { 3686 if (i <= v7->encoding() || i >= v16->encoding()) 3687 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3688 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3689 } 3690 3691 reinitialize_ptrue(); 3692 3693 pop(call_clobbered_gp_registers() - exclude, sp); 3694 } 3695 3696 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3697 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3698 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3699 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3700 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3701 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3702 sve_str(as_FloatRegister(i), Address(sp, i)); 3703 } 3704 } else { 3705 int step = (save_vectors ? 8 : 4) * wordSize; 3706 mov(rscratch1, -step); 3707 sub(sp, sp, step); 3708 for (int i = 28; i >= 4; i -= 4) { 3709 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3710 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3711 } 3712 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3713 } 3714 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3715 sub(sp, sp, total_predicate_in_bytes); 3716 for (int i = 0; i < PRegister::number_of_registers; i++) { 3717 sve_str(as_PRegister(i), Address(sp, i)); 3718 } 3719 } 3720 } 3721 3722 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3723 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3724 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3725 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3726 sve_ldr(as_PRegister(i), Address(sp, i)); 3727 } 3728 add(sp, sp, total_predicate_in_bytes); 3729 } 3730 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3731 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3732 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3733 } 3734 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3735 } else { 3736 int step = (restore_vectors ? 8 : 4) * wordSize; 3737 for (int i = 0; i <= 28; i += 4) 3738 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3739 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3740 } 3741 3742 // We may use predicate registers and rely on ptrue with SVE, 3743 // regardless of wide vector (> 8 bytes) used or not. 3744 if (use_sve) { 3745 reinitialize_ptrue(); 3746 } 3747 3748 // integer registers except lr & sp 3749 pop(RegSet::range(r0, r17), sp); 3750 #ifdef R18_RESERVED 3751 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3752 pop(RegSet::range(r20, r29), sp); 3753 #else 3754 pop(RegSet::range(r18_tls, r29), sp); 3755 #endif 3756 } 3757 3758 /** 3759 * Helpers for multiply_to_len(). 3760 */ 3761 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3762 Register src1, Register src2) { 3763 adds(dest_lo, dest_lo, src1); 3764 adc(dest_hi, dest_hi, zr); 3765 adds(dest_lo, dest_lo, src2); 3766 adc(final_dest_hi, dest_hi, zr); 3767 } 3768 3769 // Generate an address from (r + r1 extend offset). "size" is the 3770 // size of the operand. The result may be in rscratch2. 3771 Address MacroAssembler::offsetted_address(Register r, Register r1, 3772 Address::extend ext, int offset, int size) { 3773 if (offset || (ext.shift() % size != 0)) { 3774 lea(rscratch2, Address(r, r1, ext)); 3775 return Address(rscratch2, offset); 3776 } else { 3777 return Address(r, r1, ext); 3778 } 3779 } 3780 3781 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3782 { 3783 assert(offset >= 0, "spill to negative address?"); 3784 // Offset reachable ? 3785 // Not aligned - 9 bits signed offset 3786 // Aligned - 12 bits unsigned offset shifted 3787 Register base = sp; 3788 if ((offset & (size-1)) && offset >= (1<<8)) { 3789 add(tmp, base, offset & ((1<<12)-1)); 3790 base = tmp; 3791 offset &= -1u<<12; 3792 } 3793 3794 if (offset >= (1<<12) * size) { 3795 add(tmp, base, offset & (((1<<12)-1)<<12)); 3796 base = tmp; 3797 offset &= ~(((1<<12)-1)<<12); 3798 } 3799 3800 return Address(base, offset); 3801 } 3802 3803 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3804 assert(offset >= 0, "spill to negative address?"); 3805 3806 Register base = sp; 3807 3808 // An immediate offset in the range 0 to 255 which is multiplied 3809 // by the current vector or predicate register size in bytes. 3810 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3811 return Address(base, offset / sve_reg_size_in_bytes); 3812 } 3813 3814 add(tmp, base, offset); 3815 return Address(tmp); 3816 } 3817 3818 // Checks whether offset is aligned. 3819 // Returns true if it is, else false. 3820 bool MacroAssembler::merge_alignment_check(Register base, 3821 size_t size, 3822 int64_t cur_offset, 3823 int64_t prev_offset) const { 3824 if (AvoidUnalignedAccesses) { 3825 if (base == sp) { 3826 // Checks whether low offset if aligned to pair of registers. 3827 int64_t pair_mask = size * 2 - 1; 3828 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3829 return (offset & pair_mask) == 0; 3830 } else { // If base is not sp, we can't guarantee the access is aligned. 3831 return false; 3832 } 3833 } else { 3834 int64_t mask = size - 1; 3835 // Load/store pair instruction only supports element size aligned offset. 3836 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3837 } 3838 } 3839 3840 // Checks whether current and previous loads/stores can be merged. 3841 // Returns true if it can be merged, else false. 3842 bool MacroAssembler::ldst_can_merge(Register rt, 3843 const Address &adr, 3844 size_t cur_size_in_bytes, 3845 bool is_store) const { 3846 address prev = pc() - NativeInstruction::instruction_size; 3847 address last = code()->last_insn(); 3848 3849 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3850 return false; 3851 } 3852 3853 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3854 return false; 3855 } 3856 3857 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3858 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3859 3860 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3861 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3862 3863 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3864 return false; 3865 } 3866 3867 int64_t max_offset = 63 * prev_size_in_bytes; 3868 int64_t min_offset = -64 * prev_size_in_bytes; 3869 3870 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3871 3872 // Only same base can be merged. 3873 if (adr.base() != prev_ldst->base()) { 3874 return false; 3875 } 3876 3877 int64_t cur_offset = adr.offset(); 3878 int64_t prev_offset = prev_ldst->offset(); 3879 size_t diff = abs(cur_offset - prev_offset); 3880 if (diff != prev_size_in_bytes) { 3881 return false; 3882 } 3883 3884 // Following cases can not be merged: 3885 // ldr x2, [x2, #8] 3886 // ldr x3, [x2, #16] 3887 // or: 3888 // ldr x2, [x3, #8] 3889 // ldr x2, [x3, #16] 3890 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3891 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3892 return false; 3893 } 3894 3895 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3896 // Offset range must be in ldp/stp instruction's range. 3897 if (low_offset > max_offset || low_offset < min_offset) { 3898 return false; 3899 } 3900 3901 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3902 return true; 3903 } 3904 3905 return false; 3906 } 3907 3908 // Merge current load/store with previous load/store into ldp/stp. 3909 void MacroAssembler::merge_ldst(Register rt, 3910 const Address &adr, 3911 size_t cur_size_in_bytes, 3912 bool is_store) { 3913 3914 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3915 3916 Register rt_low, rt_high; 3917 address prev = pc() - NativeInstruction::instruction_size; 3918 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3919 3920 int64_t offset; 3921 3922 if (adr.offset() < prev_ldst->offset()) { 3923 offset = adr.offset(); 3924 rt_low = rt; 3925 rt_high = prev_ldst->target(); 3926 } else { 3927 offset = prev_ldst->offset(); 3928 rt_low = prev_ldst->target(); 3929 rt_high = rt; 3930 } 3931 3932 Address adr_p = Address(prev_ldst->base(), offset); 3933 // Overwrite previous generated binary. 3934 code_section()->set_end(prev); 3935 3936 const size_t sz = prev_ldst->size_in_bytes(); 3937 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3938 if (!is_store) { 3939 BLOCK_COMMENT("merged ldr pair"); 3940 if (sz == 8) { 3941 ldp(rt_low, rt_high, adr_p); 3942 } else { 3943 ldpw(rt_low, rt_high, adr_p); 3944 } 3945 } else { 3946 BLOCK_COMMENT("merged str pair"); 3947 if (sz == 8) { 3948 stp(rt_low, rt_high, adr_p); 3949 } else { 3950 stpw(rt_low, rt_high, adr_p); 3951 } 3952 } 3953 } 3954 3955 /** 3956 * Multiply 64 bit by 64 bit first loop. 3957 */ 3958 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3959 Register y, Register y_idx, Register z, 3960 Register carry, Register product, 3961 Register idx, Register kdx) { 3962 // 3963 // jlong carry, x[], y[], z[]; 3964 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3965 // huge_128 product = y[idx] * x[xstart] + carry; 3966 // z[kdx] = (jlong)product; 3967 // carry = (jlong)(product >>> 64); 3968 // } 3969 // z[xstart] = carry; 3970 // 3971 3972 Label L_first_loop, L_first_loop_exit; 3973 Label L_one_x, L_one_y, L_multiply; 3974 3975 subsw(xstart, xstart, 1); 3976 br(Assembler::MI, L_one_x); 3977 3978 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3979 ldr(x_xstart, Address(rscratch1)); 3980 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3981 3982 bind(L_first_loop); 3983 subsw(idx, idx, 1); 3984 br(Assembler::MI, L_first_loop_exit); 3985 subsw(idx, idx, 1); 3986 br(Assembler::MI, L_one_y); 3987 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3988 ldr(y_idx, Address(rscratch1)); 3989 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3990 bind(L_multiply); 3991 3992 // AArch64 has a multiply-accumulate instruction that we can't use 3993 // here because it has no way to process carries, so we have to use 3994 // separate add and adc instructions. Bah. 3995 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3996 mul(product, x_xstart, y_idx); 3997 adds(product, product, carry); 3998 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3999 4000 subw(kdx, kdx, 2); 4001 ror(product, product, 32); // back to big-endian 4002 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 4003 4004 b(L_first_loop); 4005 4006 bind(L_one_y); 4007 ldrw(y_idx, Address(y, 0)); 4008 b(L_multiply); 4009 4010 bind(L_one_x); 4011 ldrw(x_xstart, Address(x, 0)); 4012 b(L_first_loop); 4013 4014 bind(L_first_loop_exit); 4015 } 4016 4017 /** 4018 * Multiply 128 bit by 128. Unrolled inner loop. 4019 * 4020 */ 4021 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 4022 Register carry, Register carry2, 4023 Register idx, Register jdx, 4024 Register yz_idx1, Register yz_idx2, 4025 Register tmp, Register tmp3, Register tmp4, 4026 Register tmp6, Register product_hi) { 4027 4028 // jlong carry, x[], y[], z[]; 4029 // int kdx = ystart+1; 4030 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 4031 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 4032 // jlong carry2 = (jlong)(tmp3 >>> 64); 4033 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 4034 // carry = (jlong)(tmp4 >>> 64); 4035 // z[kdx+idx+1] = (jlong)tmp3; 4036 // z[kdx+idx] = (jlong)tmp4; 4037 // } 4038 // idx += 2; 4039 // if (idx > 0) { 4040 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 4041 // z[kdx+idx] = (jlong)yz_idx1; 4042 // carry = (jlong)(yz_idx1 >>> 64); 4043 // } 4044 // 4045 4046 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 4047 4048 lsrw(jdx, idx, 2); 4049 4050 bind(L_third_loop); 4051 4052 subsw(jdx, jdx, 1); 4053 br(Assembler::MI, L_third_loop_exit); 4054 subw(idx, idx, 4); 4055 4056 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4057 4058 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 4059 4060 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4061 4062 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 4063 ror(yz_idx2, yz_idx2, 32); 4064 4065 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 4066 4067 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4068 umulh(tmp4, product_hi, yz_idx1); 4069 4070 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 4071 ror(rscratch2, rscratch2, 32); 4072 4073 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 4074 umulh(carry2, product_hi, yz_idx2); 4075 4076 // propagate sum of both multiplications into carry:tmp4:tmp3 4077 adds(tmp3, tmp3, carry); 4078 adc(tmp4, tmp4, zr); 4079 adds(tmp3, tmp3, rscratch1); 4080 adcs(tmp4, tmp4, tmp); 4081 adc(carry, carry2, zr); 4082 adds(tmp4, tmp4, rscratch2); 4083 adc(carry, carry, zr); 4084 4085 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 4086 ror(tmp4, tmp4, 32); 4087 stp(tmp4, tmp3, Address(tmp6, 0)); 4088 4089 b(L_third_loop); 4090 bind (L_third_loop_exit); 4091 4092 andw (idx, idx, 0x3); 4093 cbz(idx, L_post_third_loop_done); 4094 4095 Label L_check_1; 4096 subsw(idx, idx, 2); 4097 br(Assembler::MI, L_check_1); 4098 4099 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4100 ldr(yz_idx1, Address(rscratch1, 0)); 4101 ror(yz_idx1, yz_idx1, 32); 4102 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4103 umulh(tmp4, product_hi, yz_idx1); 4104 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4105 ldr(yz_idx2, Address(rscratch1, 0)); 4106 ror(yz_idx2, yz_idx2, 32); 4107 4108 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 4109 4110 ror(tmp3, tmp3, 32); 4111 str(tmp3, Address(rscratch1, 0)); 4112 4113 bind (L_check_1); 4114 4115 andw (idx, idx, 0x1); 4116 subsw(idx, idx, 1); 4117 br(Assembler::MI, L_post_third_loop_done); 4118 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4119 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 4120 umulh(carry2, tmp4, product_hi); 4121 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4122 4123 add2_with_carry(carry2, tmp3, tmp4, carry); 4124 4125 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4126 extr(carry, carry2, tmp3, 32); 4127 4128 bind(L_post_third_loop_done); 4129 } 4130 4131 /** 4132 * Code for BigInteger::multiplyToLen() intrinsic. 4133 * 4134 * r0: x 4135 * r1: xlen 4136 * r2: y 4137 * r3: ylen 4138 * r4: z 4139 * r5: tmp0 4140 * r10: tmp1 4141 * r11: tmp2 4142 * r12: tmp3 4143 * r13: tmp4 4144 * r14: tmp5 4145 * r15: tmp6 4146 * r16: tmp7 4147 * 4148 */ 4149 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 4150 Register z, Register tmp0, 4151 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4152 Register tmp5, Register tmp6, Register product_hi) { 4153 4154 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 4155 4156 const Register idx = tmp1; 4157 const Register kdx = tmp2; 4158 const Register xstart = tmp3; 4159 4160 const Register y_idx = tmp4; 4161 const Register carry = tmp5; 4162 const Register product = xlen; 4163 const Register x_xstart = tmp0; 4164 4165 // First Loop. 4166 // 4167 // final static long LONG_MASK = 0xffffffffL; 4168 // int xstart = xlen - 1; 4169 // int ystart = ylen - 1; 4170 // long carry = 0; 4171 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 4172 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 4173 // z[kdx] = (int)product; 4174 // carry = product >>> 32; 4175 // } 4176 // z[xstart] = (int)carry; 4177 // 4178 4179 movw(idx, ylen); // idx = ylen; 4180 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 4181 mov(carry, zr); // carry = 0; 4182 4183 Label L_done; 4184 4185 movw(xstart, xlen); 4186 subsw(xstart, xstart, 1); 4187 br(Assembler::MI, L_done); 4188 4189 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 4190 4191 Label L_second_loop; 4192 cbzw(kdx, L_second_loop); 4193 4194 Label L_carry; 4195 subw(kdx, kdx, 1); 4196 cbzw(kdx, L_carry); 4197 4198 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4199 lsr(carry, carry, 32); 4200 subw(kdx, kdx, 1); 4201 4202 bind(L_carry); 4203 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4204 4205 // Second and third (nested) loops. 4206 // 4207 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4208 // carry = 0; 4209 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4210 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4211 // (z[k] & LONG_MASK) + carry; 4212 // z[k] = (int)product; 4213 // carry = product >>> 32; 4214 // } 4215 // z[i] = (int)carry; 4216 // } 4217 // 4218 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 4219 4220 const Register jdx = tmp1; 4221 4222 bind(L_second_loop); 4223 mov(carry, zr); // carry = 0; 4224 movw(jdx, ylen); // j = ystart+1 4225 4226 subsw(xstart, xstart, 1); // i = xstart-1; 4227 br(Assembler::MI, L_done); 4228 4229 str(z, Address(pre(sp, -4 * wordSize))); 4230 4231 Label L_last_x; 4232 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4233 subsw(xstart, xstart, 1); // i = xstart-1; 4234 br(Assembler::MI, L_last_x); 4235 4236 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4237 ldr(product_hi, Address(rscratch1)); 4238 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4239 4240 Label L_third_loop_prologue; 4241 bind(L_third_loop_prologue); 4242 4243 str(ylen, Address(sp, wordSize)); 4244 stp(x, xstart, Address(sp, 2 * wordSize)); 4245 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4246 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4247 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4248 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4249 4250 addw(tmp3, xlen, 1); 4251 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4252 subsw(tmp3, tmp3, 1); 4253 br(Assembler::MI, L_done); 4254 4255 lsr(carry, carry, 32); 4256 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4257 b(L_second_loop); 4258 4259 // Next infrequent code is moved outside loops. 4260 bind(L_last_x); 4261 ldrw(product_hi, Address(x, 0)); 4262 b(L_third_loop_prologue); 4263 4264 bind(L_done); 4265 } 4266 4267 // Code for BigInteger::mulAdd intrinsic 4268 // out = r0 4269 // in = r1 4270 // offset = r2 (already out.length-offset) 4271 // len = r3 4272 // k = r4 4273 // 4274 // pseudo code from java implementation: 4275 // carry = 0; 4276 // offset = out.length-offset - 1; 4277 // for (int j=len-1; j >= 0; j--) { 4278 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4279 // out[offset--] = (int)product; 4280 // carry = product >>> 32; 4281 // } 4282 // return (int)carry; 4283 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4284 Register len, Register k) { 4285 Label LOOP, END; 4286 // pre-loop 4287 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4288 csel(out, zr, out, Assembler::EQ); 4289 br(Assembler::EQ, END); 4290 add(in, in, len, LSL, 2); // in[j+1] address 4291 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4292 mov(out, zr); // used to keep carry now 4293 BIND(LOOP); 4294 ldrw(rscratch1, Address(pre(in, -4))); 4295 madd(rscratch1, rscratch1, k, out); 4296 ldrw(rscratch2, Address(pre(offset, -4))); 4297 add(rscratch1, rscratch1, rscratch2); 4298 strw(rscratch1, Address(offset)); 4299 lsr(out, rscratch1, 32); 4300 subs(len, len, 1); 4301 br(Assembler::NE, LOOP); 4302 BIND(END); 4303 } 4304 4305 /** 4306 * Emits code to update CRC-32 with a byte value according to constants in table 4307 * 4308 * @param [in,out]crc Register containing the crc. 4309 * @param [in]val Register containing the byte to fold into the CRC. 4310 * @param [in]table Register containing the table of crc constants. 4311 * 4312 * uint32_t crc; 4313 * val = crc_table[(val ^ crc) & 0xFF]; 4314 * crc = val ^ (crc >> 8); 4315 * 4316 */ 4317 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4318 eor(val, val, crc); 4319 andr(val, val, 0xff); 4320 ldrw(val, Address(table, val, Address::lsl(2))); 4321 eor(crc, val, crc, Assembler::LSR, 8); 4322 } 4323 4324 /** 4325 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4326 * 4327 * @param [in,out]crc Register containing the crc. 4328 * @param [in]v Register containing the 32-bit to fold into the CRC. 4329 * @param [in]table0 Register containing table 0 of crc constants. 4330 * @param [in]table1 Register containing table 1 of crc constants. 4331 * @param [in]table2 Register containing table 2 of crc constants. 4332 * @param [in]table3 Register containing table 3 of crc constants. 4333 * 4334 * uint32_t crc; 4335 * v = crc ^ v 4336 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4337 * 4338 */ 4339 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4340 Register table0, Register table1, Register table2, Register table3, 4341 bool upper) { 4342 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4343 uxtb(tmp, v); 4344 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4345 ubfx(tmp, v, 8, 8); 4346 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4347 eor(crc, crc, tmp); 4348 ubfx(tmp, v, 16, 8); 4349 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4350 eor(crc, crc, tmp); 4351 ubfx(tmp, v, 24, 8); 4352 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4353 eor(crc, crc, tmp); 4354 } 4355 4356 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4357 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4358 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4359 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4360 4361 subs(tmp0, len, 384); 4362 mvnw(crc, crc); 4363 br(Assembler::GE, CRC_by128_pre); 4364 BIND(CRC_less128); 4365 subs(len, len, 32); 4366 br(Assembler::GE, CRC_by32_loop); 4367 BIND(CRC_less32); 4368 adds(len, len, 32 - 4); 4369 br(Assembler::GE, CRC_by4_loop); 4370 adds(len, len, 4); 4371 br(Assembler::GT, CRC_by1_loop); 4372 b(L_exit); 4373 4374 BIND(CRC_by32_loop); 4375 ldp(tmp0, tmp1, Address(buf)); 4376 crc32x(crc, crc, tmp0); 4377 ldp(tmp2, tmp3, Address(buf, 16)); 4378 crc32x(crc, crc, tmp1); 4379 add(buf, buf, 32); 4380 crc32x(crc, crc, tmp2); 4381 subs(len, len, 32); 4382 crc32x(crc, crc, tmp3); 4383 br(Assembler::GE, CRC_by32_loop); 4384 cmn(len, (u1)32); 4385 br(Assembler::NE, CRC_less32); 4386 b(L_exit); 4387 4388 BIND(CRC_by4_loop); 4389 ldrw(tmp0, Address(post(buf, 4))); 4390 subs(len, len, 4); 4391 crc32w(crc, crc, tmp0); 4392 br(Assembler::GE, CRC_by4_loop); 4393 adds(len, len, 4); 4394 br(Assembler::LE, L_exit); 4395 BIND(CRC_by1_loop); 4396 ldrb(tmp0, Address(post(buf, 1))); 4397 subs(len, len, 1); 4398 crc32b(crc, crc, tmp0); 4399 br(Assembler::GT, CRC_by1_loop); 4400 b(L_exit); 4401 4402 BIND(CRC_by128_pre); 4403 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4404 4*256*sizeof(juint) + 8*sizeof(juint)); 4405 mov(crc, 0); 4406 crc32x(crc, crc, tmp0); 4407 crc32x(crc, crc, tmp1); 4408 4409 cbnz(len, CRC_less128); 4410 4411 BIND(L_exit); 4412 mvnw(crc, crc); 4413 } 4414 4415 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4416 Register len, Register tmp0, Register tmp1, Register tmp2, 4417 Register tmp3) { 4418 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4419 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4420 4421 mvnw(crc, crc); 4422 4423 subs(len, len, 128); 4424 br(Assembler::GE, CRC_by64_pre); 4425 BIND(CRC_less64); 4426 adds(len, len, 128-32); 4427 br(Assembler::GE, CRC_by32_loop); 4428 BIND(CRC_less32); 4429 adds(len, len, 32-4); 4430 br(Assembler::GE, CRC_by4_loop); 4431 adds(len, len, 4); 4432 br(Assembler::GT, CRC_by1_loop); 4433 b(L_exit); 4434 4435 BIND(CRC_by32_loop); 4436 ldp(tmp0, tmp1, Address(post(buf, 16))); 4437 subs(len, len, 32); 4438 crc32x(crc, crc, tmp0); 4439 ldr(tmp2, Address(post(buf, 8))); 4440 crc32x(crc, crc, tmp1); 4441 ldr(tmp3, Address(post(buf, 8))); 4442 crc32x(crc, crc, tmp2); 4443 crc32x(crc, crc, tmp3); 4444 br(Assembler::GE, CRC_by32_loop); 4445 cmn(len, (u1)32); 4446 br(Assembler::NE, CRC_less32); 4447 b(L_exit); 4448 4449 BIND(CRC_by4_loop); 4450 ldrw(tmp0, Address(post(buf, 4))); 4451 subs(len, len, 4); 4452 crc32w(crc, crc, tmp0); 4453 br(Assembler::GE, CRC_by4_loop); 4454 adds(len, len, 4); 4455 br(Assembler::LE, L_exit); 4456 BIND(CRC_by1_loop); 4457 ldrb(tmp0, Address(post(buf, 1))); 4458 subs(len, len, 1); 4459 crc32b(crc, crc, tmp0); 4460 br(Assembler::GT, CRC_by1_loop); 4461 b(L_exit); 4462 4463 BIND(CRC_by64_pre); 4464 sub(buf, buf, 8); 4465 ldp(tmp0, tmp1, Address(buf, 8)); 4466 crc32x(crc, crc, tmp0); 4467 ldr(tmp2, Address(buf, 24)); 4468 crc32x(crc, crc, tmp1); 4469 ldr(tmp3, Address(buf, 32)); 4470 crc32x(crc, crc, tmp2); 4471 ldr(tmp0, Address(buf, 40)); 4472 crc32x(crc, crc, tmp3); 4473 ldr(tmp1, Address(buf, 48)); 4474 crc32x(crc, crc, tmp0); 4475 ldr(tmp2, Address(buf, 56)); 4476 crc32x(crc, crc, tmp1); 4477 ldr(tmp3, Address(pre(buf, 64))); 4478 4479 b(CRC_by64_loop); 4480 4481 align(CodeEntryAlignment); 4482 BIND(CRC_by64_loop); 4483 subs(len, len, 64); 4484 crc32x(crc, crc, tmp2); 4485 ldr(tmp0, Address(buf, 8)); 4486 crc32x(crc, crc, tmp3); 4487 ldr(tmp1, Address(buf, 16)); 4488 crc32x(crc, crc, tmp0); 4489 ldr(tmp2, Address(buf, 24)); 4490 crc32x(crc, crc, tmp1); 4491 ldr(tmp3, Address(buf, 32)); 4492 crc32x(crc, crc, tmp2); 4493 ldr(tmp0, Address(buf, 40)); 4494 crc32x(crc, crc, tmp3); 4495 ldr(tmp1, Address(buf, 48)); 4496 crc32x(crc, crc, tmp0); 4497 ldr(tmp2, Address(buf, 56)); 4498 crc32x(crc, crc, tmp1); 4499 ldr(tmp3, Address(pre(buf, 64))); 4500 br(Assembler::GE, CRC_by64_loop); 4501 4502 // post-loop 4503 crc32x(crc, crc, tmp2); 4504 crc32x(crc, crc, tmp3); 4505 4506 sub(len, len, 64); 4507 add(buf, buf, 8); 4508 cmn(len, (u1)128); 4509 br(Assembler::NE, CRC_less64); 4510 BIND(L_exit); 4511 mvnw(crc, crc); 4512 } 4513 4514 /** 4515 * @param crc register containing existing CRC (32-bit) 4516 * @param buf register pointing to input byte buffer (byte*) 4517 * @param len register containing number of bytes 4518 * @param table register that will contain address of CRC table 4519 * @param tmp scratch register 4520 */ 4521 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4522 Register table0, Register table1, Register table2, Register table3, 4523 Register tmp, Register tmp2, Register tmp3) { 4524 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4525 4526 if (UseCryptoPmullForCRC32) { 4527 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4528 return; 4529 } 4530 4531 if (UseCRC32) { 4532 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4533 return; 4534 } 4535 4536 mvnw(crc, crc); 4537 4538 { 4539 uint64_t offset; 4540 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4541 add(table0, table0, offset); 4542 } 4543 add(table1, table0, 1*256*sizeof(juint)); 4544 add(table2, table0, 2*256*sizeof(juint)); 4545 add(table3, table0, 3*256*sizeof(juint)); 4546 4547 { // Neon code start 4548 cmp(len, (u1)64); 4549 br(Assembler::LT, L_by16); 4550 eor(v16, T16B, v16, v16); 4551 4552 Label L_fold; 4553 4554 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4555 4556 ld1(v0, v1, T2D, post(buf, 32)); 4557 ld1r(v4, T2D, post(tmp, 8)); 4558 ld1r(v5, T2D, post(tmp, 8)); 4559 ld1r(v6, T2D, post(tmp, 8)); 4560 ld1r(v7, T2D, post(tmp, 8)); 4561 mov(v16, S, 0, crc); 4562 4563 eor(v0, T16B, v0, v16); 4564 sub(len, len, 64); 4565 4566 BIND(L_fold); 4567 pmull(v22, T8H, v0, v5, T8B); 4568 pmull(v20, T8H, v0, v7, T8B); 4569 pmull(v23, T8H, v0, v4, T8B); 4570 pmull(v21, T8H, v0, v6, T8B); 4571 4572 pmull2(v18, T8H, v0, v5, T16B); 4573 pmull2(v16, T8H, v0, v7, T16B); 4574 pmull2(v19, T8H, v0, v4, T16B); 4575 pmull2(v17, T8H, v0, v6, T16B); 4576 4577 uzp1(v24, T8H, v20, v22); 4578 uzp2(v25, T8H, v20, v22); 4579 eor(v20, T16B, v24, v25); 4580 4581 uzp1(v26, T8H, v16, v18); 4582 uzp2(v27, T8H, v16, v18); 4583 eor(v16, T16B, v26, v27); 4584 4585 ushll2(v22, T4S, v20, T8H, 8); 4586 ushll(v20, T4S, v20, T4H, 8); 4587 4588 ushll2(v18, T4S, v16, T8H, 8); 4589 ushll(v16, T4S, v16, T4H, 8); 4590 4591 eor(v22, T16B, v23, v22); 4592 eor(v18, T16B, v19, v18); 4593 eor(v20, T16B, v21, v20); 4594 eor(v16, T16B, v17, v16); 4595 4596 uzp1(v17, T2D, v16, v20); 4597 uzp2(v21, T2D, v16, v20); 4598 eor(v17, T16B, v17, v21); 4599 4600 ushll2(v20, T2D, v17, T4S, 16); 4601 ushll(v16, T2D, v17, T2S, 16); 4602 4603 eor(v20, T16B, v20, v22); 4604 eor(v16, T16B, v16, v18); 4605 4606 uzp1(v17, T2D, v20, v16); 4607 uzp2(v21, T2D, v20, v16); 4608 eor(v28, T16B, v17, v21); 4609 4610 pmull(v22, T8H, v1, v5, T8B); 4611 pmull(v20, T8H, v1, v7, T8B); 4612 pmull(v23, T8H, v1, v4, T8B); 4613 pmull(v21, T8H, v1, v6, T8B); 4614 4615 pmull2(v18, T8H, v1, v5, T16B); 4616 pmull2(v16, T8H, v1, v7, T16B); 4617 pmull2(v19, T8H, v1, v4, T16B); 4618 pmull2(v17, T8H, v1, v6, T16B); 4619 4620 ld1(v0, v1, T2D, post(buf, 32)); 4621 4622 uzp1(v24, T8H, v20, v22); 4623 uzp2(v25, T8H, v20, v22); 4624 eor(v20, T16B, v24, v25); 4625 4626 uzp1(v26, T8H, v16, v18); 4627 uzp2(v27, T8H, v16, v18); 4628 eor(v16, T16B, v26, v27); 4629 4630 ushll2(v22, T4S, v20, T8H, 8); 4631 ushll(v20, T4S, v20, T4H, 8); 4632 4633 ushll2(v18, T4S, v16, T8H, 8); 4634 ushll(v16, T4S, v16, T4H, 8); 4635 4636 eor(v22, T16B, v23, v22); 4637 eor(v18, T16B, v19, v18); 4638 eor(v20, T16B, v21, v20); 4639 eor(v16, T16B, v17, v16); 4640 4641 uzp1(v17, T2D, v16, v20); 4642 uzp2(v21, T2D, v16, v20); 4643 eor(v16, T16B, v17, v21); 4644 4645 ushll2(v20, T2D, v16, T4S, 16); 4646 ushll(v16, T2D, v16, T2S, 16); 4647 4648 eor(v20, T16B, v22, v20); 4649 eor(v16, T16B, v16, v18); 4650 4651 uzp1(v17, T2D, v20, v16); 4652 uzp2(v21, T2D, v20, v16); 4653 eor(v20, T16B, v17, v21); 4654 4655 shl(v16, T2D, v28, 1); 4656 shl(v17, T2D, v20, 1); 4657 4658 eor(v0, T16B, v0, v16); 4659 eor(v1, T16B, v1, v17); 4660 4661 subs(len, len, 32); 4662 br(Assembler::GE, L_fold); 4663 4664 mov(crc, 0); 4665 mov(tmp, v0, D, 0); 4666 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4667 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4668 mov(tmp, v0, D, 1); 4669 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4670 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4671 mov(tmp, v1, D, 0); 4672 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4673 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4674 mov(tmp, v1, D, 1); 4675 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4676 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4677 4678 add(len, len, 32); 4679 } // Neon code end 4680 4681 BIND(L_by16); 4682 subs(len, len, 16); 4683 br(Assembler::GE, L_by16_loop); 4684 adds(len, len, 16-4); 4685 br(Assembler::GE, L_by4_loop); 4686 adds(len, len, 4); 4687 br(Assembler::GT, L_by1_loop); 4688 b(L_exit); 4689 4690 BIND(L_by4_loop); 4691 ldrw(tmp, Address(post(buf, 4))); 4692 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4693 subs(len, len, 4); 4694 br(Assembler::GE, L_by4_loop); 4695 adds(len, len, 4); 4696 br(Assembler::LE, L_exit); 4697 BIND(L_by1_loop); 4698 subs(len, len, 1); 4699 ldrb(tmp, Address(post(buf, 1))); 4700 update_byte_crc32(crc, tmp, table0); 4701 br(Assembler::GT, L_by1_loop); 4702 b(L_exit); 4703 4704 align(CodeEntryAlignment); 4705 BIND(L_by16_loop); 4706 subs(len, len, 16); 4707 ldp(tmp, tmp3, Address(post(buf, 16))); 4708 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4709 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4710 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4711 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4712 br(Assembler::GE, L_by16_loop); 4713 adds(len, len, 16-4); 4714 br(Assembler::GE, L_by4_loop); 4715 adds(len, len, 4); 4716 br(Assembler::GT, L_by1_loop); 4717 BIND(L_exit); 4718 mvnw(crc, crc); 4719 } 4720 4721 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4722 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4723 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4724 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4725 4726 subs(tmp0, len, 384); 4727 br(Assembler::GE, CRC_by128_pre); 4728 BIND(CRC_less128); 4729 subs(len, len, 32); 4730 br(Assembler::GE, CRC_by32_loop); 4731 BIND(CRC_less32); 4732 adds(len, len, 32 - 4); 4733 br(Assembler::GE, CRC_by4_loop); 4734 adds(len, len, 4); 4735 br(Assembler::GT, CRC_by1_loop); 4736 b(L_exit); 4737 4738 BIND(CRC_by32_loop); 4739 ldp(tmp0, tmp1, Address(buf)); 4740 crc32cx(crc, crc, tmp0); 4741 ldr(tmp2, Address(buf, 16)); 4742 crc32cx(crc, crc, tmp1); 4743 ldr(tmp3, Address(buf, 24)); 4744 crc32cx(crc, crc, tmp2); 4745 add(buf, buf, 32); 4746 subs(len, len, 32); 4747 crc32cx(crc, crc, tmp3); 4748 br(Assembler::GE, CRC_by32_loop); 4749 cmn(len, (u1)32); 4750 br(Assembler::NE, CRC_less32); 4751 b(L_exit); 4752 4753 BIND(CRC_by4_loop); 4754 ldrw(tmp0, Address(post(buf, 4))); 4755 subs(len, len, 4); 4756 crc32cw(crc, crc, tmp0); 4757 br(Assembler::GE, CRC_by4_loop); 4758 adds(len, len, 4); 4759 br(Assembler::LE, L_exit); 4760 BIND(CRC_by1_loop); 4761 ldrb(tmp0, Address(post(buf, 1))); 4762 subs(len, len, 1); 4763 crc32cb(crc, crc, tmp0); 4764 br(Assembler::GT, CRC_by1_loop); 4765 b(L_exit); 4766 4767 BIND(CRC_by128_pre); 4768 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4769 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4770 mov(crc, 0); 4771 crc32cx(crc, crc, tmp0); 4772 crc32cx(crc, crc, tmp1); 4773 4774 cbnz(len, CRC_less128); 4775 4776 BIND(L_exit); 4777 } 4778 4779 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4780 Register len, Register tmp0, Register tmp1, Register tmp2, 4781 Register tmp3) { 4782 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4783 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4784 4785 subs(len, len, 128); 4786 br(Assembler::GE, CRC_by64_pre); 4787 BIND(CRC_less64); 4788 adds(len, len, 128-32); 4789 br(Assembler::GE, CRC_by32_loop); 4790 BIND(CRC_less32); 4791 adds(len, len, 32-4); 4792 br(Assembler::GE, CRC_by4_loop); 4793 adds(len, len, 4); 4794 br(Assembler::GT, CRC_by1_loop); 4795 b(L_exit); 4796 4797 BIND(CRC_by32_loop); 4798 ldp(tmp0, tmp1, Address(post(buf, 16))); 4799 subs(len, len, 32); 4800 crc32cx(crc, crc, tmp0); 4801 ldr(tmp2, Address(post(buf, 8))); 4802 crc32cx(crc, crc, tmp1); 4803 ldr(tmp3, Address(post(buf, 8))); 4804 crc32cx(crc, crc, tmp2); 4805 crc32cx(crc, crc, tmp3); 4806 br(Assembler::GE, CRC_by32_loop); 4807 cmn(len, (u1)32); 4808 br(Assembler::NE, CRC_less32); 4809 b(L_exit); 4810 4811 BIND(CRC_by4_loop); 4812 ldrw(tmp0, Address(post(buf, 4))); 4813 subs(len, len, 4); 4814 crc32cw(crc, crc, tmp0); 4815 br(Assembler::GE, CRC_by4_loop); 4816 adds(len, len, 4); 4817 br(Assembler::LE, L_exit); 4818 BIND(CRC_by1_loop); 4819 ldrb(tmp0, Address(post(buf, 1))); 4820 subs(len, len, 1); 4821 crc32cb(crc, crc, tmp0); 4822 br(Assembler::GT, CRC_by1_loop); 4823 b(L_exit); 4824 4825 BIND(CRC_by64_pre); 4826 sub(buf, buf, 8); 4827 ldp(tmp0, tmp1, Address(buf, 8)); 4828 crc32cx(crc, crc, tmp0); 4829 ldr(tmp2, Address(buf, 24)); 4830 crc32cx(crc, crc, tmp1); 4831 ldr(tmp3, Address(buf, 32)); 4832 crc32cx(crc, crc, tmp2); 4833 ldr(tmp0, Address(buf, 40)); 4834 crc32cx(crc, crc, tmp3); 4835 ldr(tmp1, Address(buf, 48)); 4836 crc32cx(crc, crc, tmp0); 4837 ldr(tmp2, Address(buf, 56)); 4838 crc32cx(crc, crc, tmp1); 4839 ldr(tmp3, Address(pre(buf, 64))); 4840 4841 b(CRC_by64_loop); 4842 4843 align(CodeEntryAlignment); 4844 BIND(CRC_by64_loop); 4845 subs(len, len, 64); 4846 crc32cx(crc, crc, tmp2); 4847 ldr(tmp0, Address(buf, 8)); 4848 crc32cx(crc, crc, tmp3); 4849 ldr(tmp1, Address(buf, 16)); 4850 crc32cx(crc, crc, tmp0); 4851 ldr(tmp2, Address(buf, 24)); 4852 crc32cx(crc, crc, tmp1); 4853 ldr(tmp3, Address(buf, 32)); 4854 crc32cx(crc, crc, tmp2); 4855 ldr(tmp0, Address(buf, 40)); 4856 crc32cx(crc, crc, tmp3); 4857 ldr(tmp1, Address(buf, 48)); 4858 crc32cx(crc, crc, tmp0); 4859 ldr(tmp2, Address(buf, 56)); 4860 crc32cx(crc, crc, tmp1); 4861 ldr(tmp3, Address(pre(buf, 64))); 4862 br(Assembler::GE, CRC_by64_loop); 4863 4864 // post-loop 4865 crc32cx(crc, crc, tmp2); 4866 crc32cx(crc, crc, tmp3); 4867 4868 sub(len, len, 64); 4869 add(buf, buf, 8); 4870 cmn(len, (u1)128); 4871 br(Assembler::NE, CRC_less64); 4872 BIND(L_exit); 4873 } 4874 4875 /** 4876 * @param crc register containing existing CRC (32-bit) 4877 * @param buf register pointing to input byte buffer (byte*) 4878 * @param len register containing number of bytes 4879 * @param table register that will contain address of CRC table 4880 * @param tmp scratch register 4881 */ 4882 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4883 Register table0, Register table1, Register table2, Register table3, 4884 Register tmp, Register tmp2, Register tmp3) { 4885 if (UseCryptoPmullForCRC32) { 4886 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4887 } else { 4888 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4889 } 4890 } 4891 4892 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4893 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4894 Label CRC_by128_loop; 4895 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4896 4897 sub(len, len, 256); 4898 Register table = tmp0; 4899 { 4900 uint64_t offset; 4901 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4902 add(table, table, offset); 4903 } 4904 add(table, table, table_offset); 4905 4906 // Registers v0..v7 are used as data registers. 4907 // Registers v16..v31 are used as tmp registers. 4908 sub(buf, buf, 0x10); 4909 ldrq(v0, Address(buf, 0x10)); 4910 ldrq(v1, Address(buf, 0x20)); 4911 ldrq(v2, Address(buf, 0x30)); 4912 ldrq(v3, Address(buf, 0x40)); 4913 ldrq(v4, Address(buf, 0x50)); 4914 ldrq(v5, Address(buf, 0x60)); 4915 ldrq(v6, Address(buf, 0x70)); 4916 ldrq(v7, Address(pre(buf, 0x80))); 4917 4918 movi(v31, T4S, 0); 4919 mov(v31, S, 0, crc); 4920 eor(v0, T16B, v0, v31); 4921 4922 // Register v16 contains constants from the crc table. 4923 ldrq(v16, Address(table)); 4924 b(CRC_by128_loop); 4925 4926 align(OptoLoopAlignment); 4927 BIND(CRC_by128_loop); 4928 pmull (v17, T1Q, v0, v16, T1D); 4929 pmull2(v18, T1Q, v0, v16, T2D); 4930 ldrq(v0, Address(buf, 0x10)); 4931 eor3(v0, T16B, v17, v18, v0); 4932 4933 pmull (v19, T1Q, v1, v16, T1D); 4934 pmull2(v20, T1Q, v1, v16, T2D); 4935 ldrq(v1, Address(buf, 0x20)); 4936 eor3(v1, T16B, v19, v20, v1); 4937 4938 pmull (v21, T1Q, v2, v16, T1D); 4939 pmull2(v22, T1Q, v2, v16, T2D); 4940 ldrq(v2, Address(buf, 0x30)); 4941 eor3(v2, T16B, v21, v22, v2); 4942 4943 pmull (v23, T1Q, v3, v16, T1D); 4944 pmull2(v24, T1Q, v3, v16, T2D); 4945 ldrq(v3, Address(buf, 0x40)); 4946 eor3(v3, T16B, v23, v24, v3); 4947 4948 pmull (v25, T1Q, v4, v16, T1D); 4949 pmull2(v26, T1Q, v4, v16, T2D); 4950 ldrq(v4, Address(buf, 0x50)); 4951 eor3(v4, T16B, v25, v26, v4); 4952 4953 pmull (v27, T1Q, v5, v16, T1D); 4954 pmull2(v28, T1Q, v5, v16, T2D); 4955 ldrq(v5, Address(buf, 0x60)); 4956 eor3(v5, T16B, v27, v28, v5); 4957 4958 pmull (v29, T1Q, v6, v16, T1D); 4959 pmull2(v30, T1Q, v6, v16, T2D); 4960 ldrq(v6, Address(buf, 0x70)); 4961 eor3(v6, T16B, v29, v30, v6); 4962 4963 // Reuse registers v23, v24. 4964 // Using them won't block the first instruction of the next iteration. 4965 pmull (v23, T1Q, v7, v16, T1D); 4966 pmull2(v24, T1Q, v7, v16, T2D); 4967 ldrq(v7, Address(pre(buf, 0x80))); 4968 eor3(v7, T16B, v23, v24, v7); 4969 4970 subs(len, len, 0x80); 4971 br(Assembler::GE, CRC_by128_loop); 4972 4973 // fold into 512 bits 4974 // Use v31 for constants because v16 can be still in use. 4975 ldrq(v31, Address(table, 0x10)); 4976 4977 pmull (v17, T1Q, v0, v31, T1D); 4978 pmull2(v18, T1Q, v0, v31, T2D); 4979 eor3(v0, T16B, v17, v18, v4); 4980 4981 pmull (v19, T1Q, v1, v31, T1D); 4982 pmull2(v20, T1Q, v1, v31, T2D); 4983 eor3(v1, T16B, v19, v20, v5); 4984 4985 pmull (v21, T1Q, v2, v31, T1D); 4986 pmull2(v22, T1Q, v2, v31, T2D); 4987 eor3(v2, T16B, v21, v22, v6); 4988 4989 pmull (v23, T1Q, v3, v31, T1D); 4990 pmull2(v24, T1Q, v3, v31, T2D); 4991 eor3(v3, T16B, v23, v24, v7); 4992 4993 // fold into 128 bits 4994 // Use v17 for constants because v31 can be still in use. 4995 ldrq(v17, Address(table, 0x20)); 4996 pmull (v25, T1Q, v0, v17, T1D); 4997 pmull2(v26, T1Q, v0, v17, T2D); 4998 eor3(v3, T16B, v3, v25, v26); 4999 5000 // Use v18 for constants because v17 can be still in use. 5001 ldrq(v18, Address(table, 0x30)); 5002 pmull (v27, T1Q, v1, v18, T1D); 5003 pmull2(v28, T1Q, v1, v18, T2D); 5004 eor3(v3, T16B, v3, v27, v28); 5005 5006 // Use v19 for constants because v18 can be still in use. 5007 ldrq(v19, Address(table, 0x40)); 5008 pmull (v29, T1Q, v2, v19, T1D); 5009 pmull2(v30, T1Q, v2, v19, T2D); 5010 eor3(v0, T16B, v3, v29, v30); 5011 5012 add(len, len, 0x80); 5013 add(buf, buf, 0x10); 5014 5015 mov(tmp0, v0, D, 0); 5016 mov(tmp1, v0, D, 1); 5017 } 5018 5019 void MacroAssembler::addptr(const Address &dst, int32_t src) { 5020 Address adr; 5021 switch(dst.getMode()) { 5022 case Address::base_plus_offset: 5023 // This is the expected mode, although we allow all the other 5024 // forms below. 5025 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 5026 break; 5027 default: 5028 lea(rscratch2, dst); 5029 adr = Address(rscratch2); 5030 break; 5031 } 5032 ldr(rscratch1, adr); 5033 add(rscratch1, rscratch1, src); 5034 str(rscratch1, adr); 5035 } 5036 5037 void MacroAssembler::cmpptr(Register src1, Address src2) { 5038 uint64_t offset; 5039 adrp(rscratch1, src2, offset); 5040 ldr(rscratch1, Address(rscratch1, offset)); 5041 cmp(src1, rscratch1); 5042 } 5043 5044 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 5045 cmp(obj1, obj2); 5046 } 5047 5048 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5049 load_method_holder(rresult, rmethod); 5050 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5051 } 5052 5053 void MacroAssembler::load_method_holder(Register holder, Register method) { 5054 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 5055 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5056 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5057 } 5058 5059 // Loads the obj's Klass* into dst. 5060 // Preserves all registers (incl src, rscratch1 and rscratch2). 5061 // Input: 5062 // src - the oop we want to load the klass from. 5063 // dst - output narrow klass. 5064 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { 5065 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); 5066 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5067 lsr(dst, dst, markWord::klass_shift); 5068 } 5069 5070 void MacroAssembler::load_klass(Register dst, Register src) { 5071 if (UseCompactObjectHeaders) { 5072 load_narrow_klass_compact(dst, src); 5073 decode_klass_not_null(dst); 5074 } else if (UseCompressedClassPointers) { 5075 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5076 decode_klass_not_null(dst); 5077 } else { 5078 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5079 } 5080 } 5081 5082 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 5083 if (RestoreMXCSROnJNICalls) { 5084 Label OK; 5085 get_fpcr(tmp1); 5086 mov(tmp2, tmp1); 5087 // Set FPCR to the state we need. We do want Round to Nearest. We 5088 // don't want non-IEEE rounding modes or floating-point traps. 5089 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 5090 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 5091 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 5092 eor(tmp2, tmp1, tmp2); 5093 cbz(tmp2, OK); // Only reset FPCR if it's wrong 5094 set_fpcr(tmp1); 5095 bind(OK); 5096 } 5097 } 5098 5099 // ((OopHandle)result).resolve(); 5100 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 5101 // OopHandle::resolve is an indirection. 5102 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 5103 } 5104 5105 // ((WeakHandle)result).resolve(); 5106 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 5107 assert_different_registers(result, tmp1, tmp2); 5108 Label resolved; 5109 5110 // A null weak handle resolves to null. 5111 cbz(result, resolved); 5112 5113 // Only 64 bit platforms support GCs that require a tmp register 5114 // WeakHandle::resolve is an indirection like jweak. 5115 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5116 result, Address(result), tmp1, tmp2); 5117 bind(resolved); 5118 } 5119 5120 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 5121 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5122 ldr(dst, Address(rmethod, Method::const_offset())); 5123 ldr(dst, Address(dst, ConstMethod::constants_offset())); 5124 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 5125 ldr(dst, Address(dst, mirror_offset)); 5126 resolve_oop_handle(dst, tmp1, tmp2); 5127 } 5128 5129 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) { 5130 assert_different_registers(obj, klass, tmp); 5131 if (UseCompressedClassPointers) { 5132 if (UseCompactObjectHeaders) { 5133 load_narrow_klass_compact(tmp, obj); 5134 } else { 5135 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); 5136 } 5137 if (CompressedKlassPointers::base() == nullptr) { 5138 cmp(klass, tmp, LSL, CompressedKlassPointers::shift()); 5139 return; 5140 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 5141 && CompressedKlassPointers::shift() == 0) { 5142 // Only the bottom 32 bits matter 5143 cmpw(klass, tmp); 5144 return; 5145 } 5146 decode_klass_not_null(tmp); 5147 } else { 5148 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); 5149 } 5150 cmp(klass, tmp); 5151 } 5152 5153 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { 5154 if (UseCompactObjectHeaders) { 5155 load_narrow_klass_compact(tmp1, obj1); 5156 load_narrow_klass_compact(tmp2, obj2); 5157 cmpw(tmp1, tmp2); 5158 } else if (UseCompressedClassPointers) { 5159 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5160 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); 5161 cmpw(tmp1, tmp2); 5162 } else { 5163 ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5164 ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); 5165 cmp(tmp1, tmp2); 5166 } 5167 } 5168 5169 void MacroAssembler::store_klass(Register dst, Register src) { 5170 // FIXME: Should this be a store release? concurrent gcs assumes 5171 // klass length is valid if klass field is not null. 5172 assert(!UseCompactObjectHeaders, "not with compact headers"); 5173 if (UseCompressedClassPointers) { 5174 encode_klass_not_null(src); 5175 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5176 } else { 5177 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5178 } 5179 } 5180 5181 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5182 assert(!UseCompactObjectHeaders, "not with compact headers"); 5183 if (UseCompressedClassPointers) { 5184 // Store to klass gap in destination 5185 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 5186 } 5187 } 5188 5189 // Algorithm must match CompressedOops::encode. 5190 void MacroAssembler::encode_heap_oop(Register d, Register s) { 5191 #ifdef ASSERT 5192 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5193 #endif 5194 verify_oop_msg(s, "broken oop in encode_heap_oop"); 5195 if (CompressedOops::base() == nullptr) { 5196 if (CompressedOops::shift() != 0) { 5197 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5198 lsr(d, s, LogMinObjAlignmentInBytes); 5199 } else { 5200 mov(d, s); 5201 } 5202 } else { 5203 subs(d, s, rheapbase); 5204 csel(d, d, zr, Assembler::HS); 5205 lsr(d, d, LogMinObjAlignmentInBytes); 5206 5207 /* Old algorithm: is this any worse? 5208 Label nonnull; 5209 cbnz(r, nonnull); 5210 sub(r, r, rheapbase); 5211 bind(nonnull); 5212 lsr(r, r, LogMinObjAlignmentInBytes); 5213 */ 5214 } 5215 } 5216 5217 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5218 #ifdef ASSERT 5219 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5220 if (CheckCompressedOops) { 5221 Label ok; 5222 cbnz(r, ok); 5223 stop("null oop passed to encode_heap_oop_not_null"); 5224 bind(ok); 5225 } 5226 #endif 5227 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5228 if (CompressedOops::base() != nullptr) { 5229 sub(r, r, rheapbase); 5230 } 5231 if (CompressedOops::shift() != 0) { 5232 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5233 lsr(r, r, LogMinObjAlignmentInBytes); 5234 } 5235 } 5236 5237 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5238 #ifdef ASSERT 5239 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5240 if (CheckCompressedOops) { 5241 Label ok; 5242 cbnz(src, ok); 5243 stop("null oop passed to encode_heap_oop_not_null2"); 5244 bind(ok); 5245 } 5246 #endif 5247 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5248 5249 Register data = src; 5250 if (CompressedOops::base() != nullptr) { 5251 sub(dst, src, rheapbase); 5252 data = dst; 5253 } 5254 if (CompressedOops::shift() != 0) { 5255 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5256 lsr(dst, data, LogMinObjAlignmentInBytes); 5257 data = dst; 5258 } 5259 if (data == src) 5260 mov(dst, src); 5261 } 5262 5263 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5264 #ifdef ASSERT 5265 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5266 #endif 5267 if (CompressedOops::base() == nullptr) { 5268 if (CompressedOops::shift() != 0) { 5269 lsl(d, s, CompressedOops::shift()); 5270 } else if (d != s) { 5271 mov(d, s); 5272 } 5273 } else { 5274 Label done; 5275 if (d != s) 5276 mov(d, s); 5277 cbz(s, done); 5278 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5279 bind(done); 5280 } 5281 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5282 } 5283 5284 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5285 assert (UseCompressedOops, "should only be used for compressed headers"); 5286 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5287 // Cannot assert, unverified entry point counts instructions (see .ad file) 5288 // vtableStubs also counts instructions in pd_code_size_limit. 5289 // Also do not verify_oop as this is called by verify_oop. 5290 if (CompressedOops::shift() != 0) { 5291 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5292 if (CompressedOops::base() != nullptr) { 5293 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5294 } else { 5295 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5296 } 5297 } else { 5298 assert (CompressedOops::base() == nullptr, "sanity"); 5299 } 5300 } 5301 5302 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5303 assert (UseCompressedOops, "should only be used for compressed headers"); 5304 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5305 // Cannot assert, unverified entry point counts instructions (see .ad file) 5306 // vtableStubs also counts instructions in pd_code_size_limit. 5307 // Also do not verify_oop as this is called by verify_oop. 5308 if (CompressedOops::shift() != 0) { 5309 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5310 if (CompressedOops::base() != nullptr) { 5311 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5312 } else { 5313 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5314 } 5315 } else { 5316 assert (CompressedOops::base() == nullptr, "sanity"); 5317 if (dst != src) { 5318 mov(dst, src); 5319 } 5320 } 5321 } 5322 5323 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5324 5325 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5326 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5327 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized"); 5328 return _klass_decode_mode; 5329 } 5330 5331 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) { 5332 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5333 5334 // KlassDecodeMode shouldn't be set already. 5335 assert(_klass_decode_mode == KlassDecodeNone, "set once"); 5336 5337 if (base == nullptr) { 5338 return KlassDecodeZero; 5339 } 5340 5341 if (operand_valid_for_logical_immediate( 5342 /*is32*/false, (uint64_t)base)) { 5343 const uint64_t range_mask = right_n_bits(log2i_ceil(range)); 5344 if (((uint64_t)base & range_mask) == 0) { 5345 return KlassDecodeXor; 5346 } 5347 } 5348 5349 const uint64_t shifted_base = 5350 (uint64_t)base >> shift; 5351 if ((shifted_base & 0xffff0000ffffffff) == 0) { 5352 return KlassDecodeMovk; 5353 } 5354 5355 // No valid encoding. 5356 return KlassDecodeNone; 5357 } 5358 5359 // Check if one of the above decoding modes will work for given base, shift and range. 5360 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) { 5361 return klass_decode_mode(base, shift, range) != KlassDecodeNone; 5362 } 5363 5364 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) { 5365 _klass_decode_mode = klass_decode_mode(base, shift, range); 5366 return _klass_decode_mode != KlassDecodeNone; 5367 } 5368 5369 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5370 switch (klass_decode_mode()) { 5371 case KlassDecodeZero: 5372 if (CompressedKlassPointers::shift() != 0) { 5373 lsr(dst, src, CompressedKlassPointers::shift()); 5374 } else { 5375 if (dst != src) mov(dst, src); 5376 } 5377 break; 5378 5379 case KlassDecodeXor: 5380 if (CompressedKlassPointers::shift() != 0) { 5381 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5382 lsr(dst, dst, CompressedKlassPointers::shift()); 5383 } else { 5384 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5385 } 5386 break; 5387 5388 case KlassDecodeMovk: 5389 if (CompressedKlassPointers::shift() != 0) { 5390 ubfx(dst, src, CompressedKlassPointers::shift(), 32); 5391 } else { 5392 movw(dst, src); 5393 } 5394 break; 5395 5396 case KlassDecodeNone: 5397 ShouldNotReachHere(); 5398 break; 5399 } 5400 } 5401 5402 void MacroAssembler::encode_klass_not_null(Register r) { 5403 encode_klass_not_null(r, r); 5404 } 5405 5406 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5407 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5408 5409 switch (klass_decode_mode()) { 5410 case KlassDecodeZero: 5411 if (CompressedKlassPointers::shift() != 0) { 5412 lsl(dst, src, CompressedKlassPointers::shift()); 5413 } else { 5414 if (dst != src) mov(dst, src); 5415 } 5416 break; 5417 5418 case KlassDecodeXor: 5419 if (CompressedKlassPointers::shift() != 0) { 5420 lsl(dst, src, CompressedKlassPointers::shift()); 5421 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5422 } else { 5423 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5424 } 5425 break; 5426 5427 case KlassDecodeMovk: { 5428 const uint64_t shifted_base = 5429 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5430 5431 if (dst != src) movw(dst, src); 5432 movk(dst, shifted_base >> 32, 32); 5433 5434 if (CompressedKlassPointers::shift() != 0) { 5435 lsl(dst, dst, CompressedKlassPointers::shift()); 5436 } 5437 5438 break; 5439 } 5440 5441 case KlassDecodeNone: 5442 ShouldNotReachHere(); 5443 break; 5444 } 5445 } 5446 5447 void MacroAssembler::decode_klass_not_null(Register r) { 5448 decode_klass_not_null(r, r); 5449 } 5450 5451 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5452 #ifdef ASSERT 5453 { 5454 ThreadInVMfromUnknown tiv; 5455 assert (UseCompressedOops, "should only be used for compressed oops"); 5456 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5457 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5458 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5459 } 5460 #endif 5461 int oop_index = oop_recorder()->find_index(obj); 5462 InstructionMark im(this); 5463 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5464 code_section()->relocate(inst_mark(), rspec); 5465 movz(dst, 0xDEAD, 16); 5466 movk(dst, 0xBEEF); 5467 } 5468 5469 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5470 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5471 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5472 int index = oop_recorder()->find_index(k); 5473 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5474 5475 InstructionMark im(this); 5476 RelocationHolder rspec = metadata_Relocation::spec(index); 5477 code_section()->relocate(inst_mark(), rspec); 5478 narrowKlass nk = CompressedKlassPointers::encode(k); 5479 movz(dst, (nk >> 16), 16); 5480 movk(dst, nk & 0xffff); 5481 } 5482 5483 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5484 Register dst, Address src, 5485 Register tmp1, Register tmp2) { 5486 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5487 decorators = AccessInternal::decorator_fixup(decorators, type); 5488 bool as_raw = (decorators & AS_RAW) != 0; 5489 if (as_raw) { 5490 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5491 } else { 5492 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5493 } 5494 } 5495 5496 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5497 Address dst, Register val, 5498 Register tmp1, Register tmp2, Register tmp3) { 5499 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5500 decorators = AccessInternal::decorator_fixup(decorators, type); 5501 bool as_raw = (decorators & AS_RAW) != 0; 5502 if (as_raw) { 5503 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5504 } else { 5505 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5506 } 5507 } 5508 5509 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5510 Register tmp2, DecoratorSet decorators) { 5511 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5512 } 5513 5514 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5515 Register tmp2, DecoratorSet decorators) { 5516 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5517 } 5518 5519 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5520 Register tmp2, Register tmp3, DecoratorSet decorators) { 5521 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5522 } 5523 5524 // Used for storing nulls. 5525 void MacroAssembler::store_heap_oop_null(Address dst) { 5526 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5527 } 5528 5529 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5530 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5531 int index = oop_recorder()->allocate_metadata_index(obj); 5532 RelocationHolder rspec = metadata_Relocation::spec(index); 5533 return Address((address)obj, rspec); 5534 } 5535 5536 // Move an oop into a register. 5537 void MacroAssembler::movoop(Register dst, jobject obj) { 5538 int oop_index; 5539 if (obj == nullptr) { 5540 oop_index = oop_recorder()->allocate_oop_index(obj); 5541 } else { 5542 #ifdef ASSERT 5543 { 5544 ThreadInVMfromUnknown tiv; 5545 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5546 } 5547 #endif 5548 oop_index = oop_recorder()->find_index(obj); 5549 } 5550 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5551 5552 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5553 mov(dst, Address((address)obj, rspec)); 5554 } else { 5555 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5556 ldr_constant(dst, Address(dummy, rspec)); 5557 } 5558 5559 } 5560 5561 // Move a metadata address into a register. 5562 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5563 int oop_index; 5564 if (obj == nullptr) { 5565 oop_index = oop_recorder()->allocate_metadata_index(obj); 5566 } else { 5567 oop_index = oop_recorder()->find_index(obj); 5568 } 5569 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5570 mov(dst, Address((address)obj, rspec)); 5571 } 5572 5573 Address MacroAssembler::constant_oop_address(jobject obj) { 5574 #ifdef ASSERT 5575 { 5576 ThreadInVMfromUnknown tiv; 5577 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5578 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5579 } 5580 #endif 5581 int oop_index = oop_recorder()->find_index(obj); 5582 return Address((address)obj, oop_Relocation::spec(oop_index)); 5583 } 5584 5585 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5586 void MacroAssembler::tlab_allocate(Register obj, 5587 Register var_size_in_bytes, 5588 int con_size_in_bytes, 5589 Register t1, 5590 Register t2, 5591 Label& slow_case) { 5592 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5593 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5594 } 5595 5596 void MacroAssembler::inc_held_monitor_count(Register tmp) { 5597 Address dst(rthread, JavaThread::held_monitor_count_offset()); 5598 #ifdef ASSERT 5599 ldr(tmp, dst); 5600 increment(tmp); 5601 str(tmp, dst); 5602 Label ok; 5603 tbz(tmp, 63, ok); 5604 STOP("assert(held monitor count underflow)"); 5605 should_not_reach_here(); 5606 bind(ok); 5607 #else 5608 increment(dst); 5609 #endif 5610 } 5611 5612 void MacroAssembler::dec_held_monitor_count(Register tmp) { 5613 Address dst(rthread, JavaThread::held_monitor_count_offset()); 5614 #ifdef ASSERT 5615 ldr(tmp, dst); 5616 decrement(tmp); 5617 str(tmp, dst); 5618 Label ok; 5619 tbz(tmp, 63, ok); 5620 STOP("assert(held monitor count underflow)"); 5621 should_not_reach_here(); 5622 bind(ok); 5623 #else 5624 decrement(dst); 5625 #endif 5626 } 5627 5628 void MacroAssembler::verify_tlab() { 5629 #ifdef ASSERT 5630 if (UseTLAB && VerifyOops) { 5631 Label next, ok; 5632 5633 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5634 5635 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5636 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5637 cmp(rscratch2, rscratch1); 5638 br(Assembler::HS, next); 5639 STOP("assert(top >= start)"); 5640 should_not_reach_here(); 5641 5642 bind(next); 5643 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5644 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5645 cmp(rscratch2, rscratch1); 5646 br(Assembler::HS, ok); 5647 STOP("assert(top <= end)"); 5648 should_not_reach_here(); 5649 5650 bind(ok); 5651 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5652 } 5653 #endif 5654 } 5655 5656 // Writes to stack successive pages until offset reached to check for 5657 // stack overflow + shadow pages. This clobbers tmp. 5658 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5659 assert_different_registers(tmp, size, rscratch1); 5660 mov(tmp, sp); 5661 // Bang stack for total size given plus shadow page size. 5662 // Bang one page at a time because large size can bang beyond yellow and 5663 // red zones. 5664 Label loop; 5665 mov(rscratch1, (int)os::vm_page_size()); 5666 bind(loop); 5667 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5668 subsw(size, size, rscratch1); 5669 str(size, Address(tmp)); 5670 br(Assembler::GT, loop); 5671 5672 // Bang down shadow pages too. 5673 // At this point, (tmp-0) is the last address touched, so don't 5674 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5675 // was post-decremented.) Skip this address by starting at i=1, and 5676 // touch a few more pages below. N.B. It is important to touch all 5677 // the way down to and including i=StackShadowPages. 5678 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5679 // this could be any sized move but this is can be a debugging crumb 5680 // so the bigger the better. 5681 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5682 str(size, Address(tmp)); 5683 } 5684 } 5685 5686 // Move the address of the polling page into dest. 5687 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5688 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5689 } 5690 5691 // Read the polling page. The address of the polling page must 5692 // already be in r. 5693 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5694 address mark; 5695 { 5696 InstructionMark im(this); 5697 code_section()->relocate(inst_mark(), rtype); 5698 ldrw(zr, Address(r, 0)); 5699 mark = inst_mark(); 5700 } 5701 verify_cross_modify_fence_not_required(); 5702 return mark; 5703 } 5704 5705 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5706 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5707 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5708 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5709 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5710 int64_t offset_low = dest_page - low_page; 5711 int64_t offset_high = dest_page - high_page; 5712 5713 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5714 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5715 5716 InstructionMark im(this); 5717 code_section()->relocate(inst_mark(), dest.rspec()); 5718 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5719 // the code cache so that if it is relocated we know it will still reach 5720 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5721 _adrp(reg1, dest.target()); 5722 } else { 5723 uint64_t target = (uint64_t)dest.target(); 5724 uint64_t adrp_target 5725 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5726 5727 _adrp(reg1, (address)adrp_target); 5728 movk(reg1, target >> 32, 32); 5729 } 5730 byte_offset = (uint64_t)dest.target() & 0xfff; 5731 } 5732 5733 void MacroAssembler::load_byte_map_base(Register reg) { 5734 CardTable::CardValue* byte_map_base = 5735 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5736 5737 // Strictly speaking the byte_map_base isn't an address at all, and it might 5738 // even be negative. It is thus materialised as a constant. 5739 #if INCLUDE_CDS 5740 if (SCCache::is_on_for_write()) { 5741 // SCA needs relocation info for card table base 5742 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base))); 5743 } else { 5744 #endif 5745 mov(reg, (uint64_t)byte_map_base); 5746 #if INCLUDE_CDS 5747 } 5748 #endif 5749 } 5750 5751 void MacroAssembler::load_aotrc_address(Register reg, address a) { 5752 #if INCLUDE_CDS 5753 assert(AOTRuntimeConstants::contains(a), "address out of range for data area"); 5754 if (SCCache::is_on_for_write()) { 5755 // all aotrc field addresses should be registered in the SCC address table 5756 lea(reg, ExternalAddress(a)); 5757 } else { 5758 mov(reg, (uint64_t)a); 5759 } 5760 #else 5761 ShouldNotReachHere(); 5762 #endif 5763 } 5764 5765 void MacroAssembler::build_frame(int framesize) { 5766 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5767 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5768 protect_return_address(); 5769 if (framesize < ((1 << 9) + 2 * wordSize)) { 5770 sub(sp, sp, framesize); 5771 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5772 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5773 } else { 5774 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5775 if (PreserveFramePointer) mov(rfp, sp); 5776 if (framesize < ((1 << 12) + 2 * wordSize)) 5777 sub(sp, sp, framesize - 2 * wordSize); 5778 else { 5779 mov(rscratch1, framesize - 2 * wordSize); 5780 sub(sp, sp, rscratch1); 5781 } 5782 } 5783 verify_cross_modify_fence_not_required(); 5784 } 5785 5786 void MacroAssembler::remove_frame(int framesize) { 5787 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5788 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5789 if (framesize < ((1 << 9) + 2 * wordSize)) { 5790 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5791 add(sp, sp, framesize); 5792 } else { 5793 if (framesize < ((1 << 12) + 2 * wordSize)) 5794 add(sp, sp, framesize - 2 * wordSize); 5795 else { 5796 mov(rscratch1, framesize - 2 * wordSize); 5797 add(sp, sp, rscratch1); 5798 } 5799 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5800 } 5801 authenticate_return_address(); 5802 } 5803 5804 5805 // This method counts leading positive bytes (highest bit not set) in provided byte array 5806 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5807 // Simple and most common case of aligned small array which is not at the 5808 // end of memory page is placed here. All other cases are in stub. 5809 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5810 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5811 assert_different_registers(ary1, len, result); 5812 5813 mov(result, len); 5814 cmpw(len, 0); 5815 br(LE, DONE); 5816 cmpw(len, 4 * wordSize); 5817 br(GE, STUB_LONG); // size > 32 then go to stub 5818 5819 int shift = 64 - exact_log2(os::vm_page_size()); 5820 lsl(rscratch1, ary1, shift); 5821 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5822 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5823 br(CS, STUB); // at the end of page then go to stub 5824 subs(len, len, wordSize); 5825 br(LT, END); 5826 5827 BIND(LOOP); 5828 ldr(rscratch1, Address(post(ary1, wordSize))); 5829 tst(rscratch1, UPPER_BIT_MASK); 5830 br(NE, SET_RESULT); 5831 subs(len, len, wordSize); 5832 br(GE, LOOP); 5833 cmpw(len, -wordSize); 5834 br(EQ, DONE); 5835 5836 BIND(END); 5837 ldr(rscratch1, Address(ary1)); 5838 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5839 lslv(rscratch1, rscratch1, rscratch2); 5840 tst(rscratch1, UPPER_BIT_MASK); 5841 br(NE, SET_RESULT); 5842 b(DONE); 5843 5844 BIND(STUB); 5845 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5846 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5847 address tpc1 = trampoline_call(count_pos); 5848 if (tpc1 == nullptr) { 5849 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5850 postcond(pc() == badAddress); 5851 return nullptr; 5852 } 5853 b(DONE); 5854 5855 BIND(STUB_LONG); 5856 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5857 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5858 address tpc2 = trampoline_call(count_pos_long); 5859 if (tpc2 == nullptr) { 5860 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5861 postcond(pc() == badAddress); 5862 return nullptr; 5863 } 5864 b(DONE); 5865 5866 BIND(SET_RESULT); 5867 5868 add(len, len, wordSize); 5869 sub(result, result, len); 5870 5871 BIND(DONE); 5872 postcond(pc() != badAddress); 5873 return pc(); 5874 } 5875 5876 // Clobbers: rscratch1, rscratch2, rflags 5877 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5878 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5879 Register tmp4, Register tmp5, Register result, 5880 Register cnt1, int elem_size) { 5881 Label DONE, SAME; 5882 Register tmp1 = rscratch1; 5883 Register tmp2 = rscratch2; 5884 int elem_per_word = wordSize/elem_size; 5885 int log_elem_size = exact_log2(elem_size); 5886 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5887 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5888 int base_offset 5889 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5890 // When the length offset is not aligned to 8 bytes, 5891 // then we align it down. This is valid because the new 5892 // offset will always be the klass which is the same 5893 // for type arrays. 5894 int start_offset = align_down(length_offset, BytesPerWord); 5895 int extra_length = base_offset - start_offset; 5896 assert(start_offset == length_offset || start_offset == klass_offset, 5897 "start offset must be 8-byte-aligned or be the klass offset"); 5898 assert(base_offset != start_offset, "must include the length field"); 5899 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5900 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5901 5902 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5903 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5904 5905 #ifndef PRODUCT 5906 { 5907 const char kind = (elem_size == 2) ? 'U' : 'L'; 5908 char comment[64]; 5909 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5910 BLOCK_COMMENT(comment); 5911 } 5912 #endif 5913 5914 // if (a1 == a2) 5915 // return true; 5916 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5917 br(EQ, SAME); 5918 5919 if (UseSimpleArrayEquals) { 5920 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5921 // if (a1 == nullptr || a2 == nullptr) 5922 // return false; 5923 // a1 & a2 == 0 means (some-pointer is null) or 5924 // (very-rare-or-even-probably-impossible-pointer-values) 5925 // so, we can save one branch in most cases 5926 tst(a1, a2); 5927 mov(result, false); 5928 br(EQ, A_MIGHT_BE_NULL); 5929 // if (a1.length != a2.length) 5930 // return false; 5931 bind(A_IS_NOT_NULL); 5932 ldrw(cnt1, Address(a1, length_offset)); 5933 // Increase loop counter by diff between base- and actual start-offset. 5934 addw(cnt1, cnt1, extra_length); 5935 lea(a1, Address(a1, start_offset)); 5936 lea(a2, Address(a2, start_offset)); 5937 // Check for short strings, i.e. smaller than wordSize. 5938 subs(cnt1, cnt1, elem_per_word); 5939 br(Assembler::LT, SHORT); 5940 // Main 8 byte comparison loop. 5941 bind(NEXT_WORD); { 5942 ldr(tmp1, Address(post(a1, wordSize))); 5943 ldr(tmp2, Address(post(a2, wordSize))); 5944 subs(cnt1, cnt1, elem_per_word); 5945 eor(tmp5, tmp1, tmp2); 5946 cbnz(tmp5, DONE); 5947 } br(GT, NEXT_WORD); 5948 // Last longword. In the case where length == 4 we compare the 5949 // same longword twice, but that's still faster than another 5950 // conditional branch. 5951 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5952 // length == 4. 5953 if (log_elem_size > 0) 5954 lsl(cnt1, cnt1, log_elem_size); 5955 ldr(tmp3, Address(a1, cnt1)); 5956 ldr(tmp4, Address(a2, cnt1)); 5957 eor(tmp5, tmp3, tmp4); 5958 cbnz(tmp5, DONE); 5959 b(SAME); 5960 bind(A_MIGHT_BE_NULL); 5961 // in case both a1 and a2 are not-null, proceed with loads 5962 cbz(a1, DONE); 5963 cbz(a2, DONE); 5964 b(A_IS_NOT_NULL); 5965 bind(SHORT); 5966 5967 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5968 { 5969 ldrw(tmp1, Address(post(a1, 4))); 5970 ldrw(tmp2, Address(post(a2, 4))); 5971 eorw(tmp5, tmp1, tmp2); 5972 cbnzw(tmp5, DONE); 5973 } 5974 bind(TAIL03); 5975 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5976 { 5977 ldrh(tmp3, Address(post(a1, 2))); 5978 ldrh(tmp4, Address(post(a2, 2))); 5979 eorw(tmp5, tmp3, tmp4); 5980 cbnzw(tmp5, DONE); 5981 } 5982 bind(TAIL01); 5983 if (elem_size == 1) { // Only needed when comparing byte arrays. 5984 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5985 { 5986 ldrb(tmp1, a1); 5987 ldrb(tmp2, a2); 5988 eorw(tmp5, tmp1, tmp2); 5989 cbnzw(tmp5, DONE); 5990 } 5991 } 5992 } else { 5993 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5994 CSET_EQ, LAST_CHECK; 5995 mov(result, false); 5996 cbz(a1, DONE); 5997 ldrw(cnt1, Address(a1, length_offset)); 5998 cbz(a2, DONE); 5999 // Increase loop counter by diff between base- and actual start-offset. 6000 addw(cnt1, cnt1, extra_length); 6001 6002 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 6003 // faster to perform another branch before comparing a1 and a2 6004 cmp(cnt1, (u1)elem_per_word); 6005 br(LE, SHORT); // short or same 6006 ldr(tmp3, Address(pre(a1, start_offset))); 6007 subs(zr, cnt1, stubBytesThreshold); 6008 br(GE, STUB); 6009 ldr(tmp4, Address(pre(a2, start_offset))); 6010 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6011 6012 // Main 16 byte comparison loop with 2 exits 6013 bind(NEXT_DWORD); { 6014 ldr(tmp1, Address(pre(a1, wordSize))); 6015 ldr(tmp2, Address(pre(a2, wordSize))); 6016 subs(cnt1, cnt1, 2 * elem_per_word); 6017 br(LE, TAIL); 6018 eor(tmp4, tmp3, tmp4); 6019 cbnz(tmp4, DONE); 6020 ldr(tmp3, Address(pre(a1, wordSize))); 6021 ldr(tmp4, Address(pre(a2, wordSize))); 6022 cmp(cnt1, (u1)elem_per_word); 6023 br(LE, TAIL2); 6024 cmp(tmp1, tmp2); 6025 } br(EQ, NEXT_DWORD); 6026 b(DONE); 6027 6028 bind(TAIL); 6029 eor(tmp4, tmp3, tmp4); 6030 eor(tmp2, tmp1, tmp2); 6031 lslv(tmp2, tmp2, tmp5); 6032 orr(tmp5, tmp4, tmp2); 6033 cmp(tmp5, zr); 6034 b(CSET_EQ); 6035 6036 bind(TAIL2); 6037 eor(tmp2, tmp1, tmp2); 6038 cbnz(tmp2, DONE); 6039 b(LAST_CHECK); 6040 6041 bind(STUB); 6042 ldr(tmp4, Address(pre(a2, start_offset))); 6043 if (elem_size == 2) { // convert to byte counter 6044 lsl(cnt1, cnt1, 1); 6045 } 6046 eor(tmp5, tmp3, tmp4); 6047 cbnz(tmp5, DONE); 6048 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 6049 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 6050 address tpc = trampoline_call(stub); 6051 if (tpc == nullptr) { 6052 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 6053 postcond(pc() == badAddress); 6054 return nullptr; 6055 } 6056 b(DONE); 6057 6058 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 6059 // so, if a2 == null => return false(0), else return true, so we can return a2 6060 mov(result, a2); 6061 b(DONE); 6062 bind(SHORT); 6063 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6064 ldr(tmp3, Address(a1, start_offset)); 6065 ldr(tmp4, Address(a2, start_offset)); 6066 bind(LAST_CHECK); 6067 eor(tmp4, tmp3, tmp4); 6068 lslv(tmp5, tmp4, tmp5); 6069 cmp(tmp5, zr); 6070 bind(CSET_EQ); 6071 cset(result, EQ); 6072 b(DONE); 6073 } 6074 6075 bind(SAME); 6076 mov(result, true); 6077 // That's it. 6078 bind(DONE); 6079 6080 BLOCK_COMMENT("} array_equals"); 6081 postcond(pc() != badAddress); 6082 return pc(); 6083 } 6084 6085 // Compare Strings 6086 6087 // For Strings we're passed the address of the first characters in a1 6088 // and a2 and the length in cnt1. 6089 // There are two implementations. For arrays >= 8 bytes, all 6090 // comparisons (including the final one, which may overlap) are 6091 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 6092 // halfword, then a short, and then a byte. 6093 6094 void MacroAssembler::string_equals(Register a1, Register a2, 6095 Register result, Register cnt1) 6096 { 6097 Label SAME, DONE, SHORT, NEXT_WORD; 6098 Register tmp1 = rscratch1; 6099 Register tmp2 = rscratch2; 6100 Register cnt2 = tmp2; // cnt2 only used in array length compare 6101 6102 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 6103 6104 #ifndef PRODUCT 6105 { 6106 char comment[64]; 6107 snprintf(comment, sizeof comment, "{string_equalsL"); 6108 BLOCK_COMMENT(comment); 6109 } 6110 #endif 6111 6112 mov(result, false); 6113 6114 // Check for short strings, i.e. smaller than wordSize. 6115 subs(cnt1, cnt1, wordSize); 6116 br(Assembler::LT, SHORT); 6117 // Main 8 byte comparison loop. 6118 bind(NEXT_WORD); { 6119 ldr(tmp1, Address(post(a1, wordSize))); 6120 ldr(tmp2, Address(post(a2, wordSize))); 6121 subs(cnt1, cnt1, wordSize); 6122 eor(tmp1, tmp1, tmp2); 6123 cbnz(tmp1, DONE); 6124 } br(GT, NEXT_WORD); 6125 // Last longword. In the case where length == 4 we compare the 6126 // same longword twice, but that's still faster than another 6127 // conditional branch. 6128 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 6129 // length == 4. 6130 ldr(tmp1, Address(a1, cnt1)); 6131 ldr(tmp2, Address(a2, cnt1)); 6132 eor(tmp2, tmp1, tmp2); 6133 cbnz(tmp2, DONE); 6134 b(SAME); 6135 6136 bind(SHORT); 6137 Label TAIL03, TAIL01; 6138 6139 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 6140 { 6141 ldrw(tmp1, Address(post(a1, 4))); 6142 ldrw(tmp2, Address(post(a2, 4))); 6143 eorw(tmp1, tmp1, tmp2); 6144 cbnzw(tmp1, DONE); 6145 } 6146 bind(TAIL03); 6147 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 6148 { 6149 ldrh(tmp1, Address(post(a1, 2))); 6150 ldrh(tmp2, Address(post(a2, 2))); 6151 eorw(tmp1, tmp1, tmp2); 6152 cbnzw(tmp1, DONE); 6153 } 6154 bind(TAIL01); 6155 tbz(cnt1, 0, SAME); // 0-1 bytes left. 6156 { 6157 ldrb(tmp1, a1); 6158 ldrb(tmp2, a2); 6159 eorw(tmp1, tmp1, tmp2); 6160 cbnzw(tmp1, DONE); 6161 } 6162 // Arrays are equal. 6163 bind(SAME); 6164 mov(result, true); 6165 6166 // That's it. 6167 bind(DONE); 6168 BLOCK_COMMENT("} string_equals"); 6169 } 6170 6171 6172 // The size of the blocks erased by the zero_blocks stub. We must 6173 // handle anything smaller than this ourselves in zero_words(). 6174 const int MacroAssembler::zero_words_block_size = 8; 6175 6176 // zero_words() is used by C2 ClearArray patterns and by 6177 // C1_MacroAssembler. It is as small as possible, handling small word 6178 // counts locally and delegating anything larger to the zero_blocks 6179 // stub. It is expanded many times in compiled code, so it is 6180 // important to keep it short. 6181 6182 // ptr: Address of a buffer to be zeroed. 6183 // cnt: Count in HeapWords. 6184 // 6185 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 6186 address MacroAssembler::zero_words(Register ptr, Register cnt) 6187 { 6188 assert(is_power_of_2(zero_words_block_size), "adjust this"); 6189 6190 BLOCK_COMMENT("zero_words {"); 6191 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 6192 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6193 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6194 6195 subs(rscratch1, cnt, zero_words_block_size); 6196 Label around; 6197 br(LO, around); 6198 { 6199 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6200 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6201 // Make sure this is a C2 compilation. C1 allocates space only for 6202 // trampoline stubs generated by Call LIR ops, and in any case it 6203 // makes sense for a C1 compilation task to proceed as quickly as 6204 // possible. 6205 CompileTask* task; 6206 if (StubRoutines::aarch64::complete() 6207 && Thread::current()->is_Compiler_thread() 6208 && (task = ciEnv::current()->task()) 6209 && is_c2_compile(task->comp_level())) { 6210 address tpc = trampoline_call(zero_blocks); 6211 if (tpc == nullptr) { 6212 DEBUG_ONLY(reset_labels(around)); 6213 return nullptr; 6214 } 6215 } else { 6216 far_call(zero_blocks); 6217 } 6218 } 6219 bind(around); 6220 6221 // We have a few words left to do. zero_blocks has adjusted r10 and r11 6222 // for us. 6223 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 6224 Label l; 6225 tbz(cnt, exact_log2(i), l); 6226 for (int j = 0; j < i; j += 2) { 6227 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 6228 } 6229 bind(l); 6230 } 6231 { 6232 Label l; 6233 tbz(cnt, 0, l); 6234 str(zr, Address(ptr)); 6235 bind(l); 6236 } 6237 6238 BLOCK_COMMENT("} zero_words"); 6239 return pc(); 6240 } 6241 6242 // base: Address of a buffer to be zeroed, 8 bytes aligned. 6243 // cnt: Immediate count in HeapWords. 6244 // 6245 // r10, r11, rscratch1, and rscratch2 are clobbered. 6246 address MacroAssembler::zero_words(Register base, uint64_t cnt) 6247 { 6248 assert(wordSize <= BlockZeroingLowLimit, 6249 "increase BlockZeroingLowLimit"); 6250 address result = nullptr; 6251 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 6252 #ifndef PRODUCT 6253 { 6254 char buf[64]; 6255 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 6256 BLOCK_COMMENT(buf); 6257 } 6258 #endif 6259 if (cnt >= 16) { 6260 uint64_t loops = cnt/16; 6261 if (loops > 1) { 6262 mov(rscratch2, loops - 1); 6263 } 6264 { 6265 Label loop; 6266 bind(loop); 6267 for (int i = 0; i < 16; i += 2) { 6268 stp(zr, zr, Address(base, i * BytesPerWord)); 6269 } 6270 add(base, base, 16 * BytesPerWord); 6271 if (loops > 1) { 6272 subs(rscratch2, rscratch2, 1); 6273 br(GE, loop); 6274 } 6275 } 6276 } 6277 cnt %= 16; 6278 int i = cnt & 1; // store any odd word to start 6279 if (i) str(zr, Address(base)); 6280 for (; i < (int)cnt; i += 2) { 6281 stp(zr, zr, Address(base, i * wordSize)); 6282 } 6283 BLOCK_COMMENT("} zero_words"); 6284 result = pc(); 6285 } else { 6286 mov(r10, base); mov(r11, cnt); 6287 result = zero_words(r10, r11); 6288 } 6289 return result; 6290 } 6291 6292 // Zero blocks of memory by using DC ZVA. 6293 // 6294 // Aligns the base address first sufficiently for DC ZVA, then uses 6295 // DC ZVA repeatedly for every full block. cnt is the size to be 6296 // zeroed in HeapWords. Returns the count of words left to be zeroed 6297 // in cnt. 6298 // 6299 // NOTE: This is intended to be used in the zero_blocks() stub. If 6300 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6301 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6302 Register tmp = rscratch1; 6303 Register tmp2 = rscratch2; 6304 int zva_length = VM_Version::zva_length(); 6305 Label initial_table_end, loop_zva; 6306 Label fini; 6307 6308 // Base must be 16 byte aligned. If not just return and let caller handle it 6309 tst(base, 0x0f); 6310 br(Assembler::NE, fini); 6311 // Align base with ZVA length. 6312 neg(tmp, base); 6313 andr(tmp, tmp, zva_length - 1); 6314 6315 // tmp: the number of bytes to be filled to align the base with ZVA length. 6316 add(base, base, tmp); 6317 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6318 adr(tmp2, initial_table_end); 6319 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6320 br(tmp2); 6321 6322 for (int i = -zva_length + 16; i < 0; i += 16) 6323 stp(zr, zr, Address(base, i)); 6324 bind(initial_table_end); 6325 6326 sub(cnt, cnt, zva_length >> 3); 6327 bind(loop_zva); 6328 dc(Assembler::ZVA, base); 6329 subs(cnt, cnt, zva_length >> 3); 6330 add(base, base, zva_length); 6331 br(Assembler::GE, loop_zva); 6332 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6333 bind(fini); 6334 } 6335 6336 // base: Address of a buffer to be filled, 8 bytes aligned. 6337 // cnt: Count in 8-byte unit. 6338 // value: Value to be filled with. 6339 // base will point to the end of the buffer after filling. 6340 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6341 { 6342 // Algorithm: 6343 // 6344 // if (cnt == 0) { 6345 // return; 6346 // } 6347 // if ((p & 8) != 0) { 6348 // *p++ = v; 6349 // } 6350 // 6351 // scratch1 = cnt & 14; 6352 // cnt -= scratch1; 6353 // p += scratch1; 6354 // switch (scratch1 / 2) { 6355 // do { 6356 // cnt -= 16; 6357 // p[-16] = v; 6358 // p[-15] = v; 6359 // case 7: 6360 // p[-14] = v; 6361 // p[-13] = v; 6362 // case 6: 6363 // p[-12] = v; 6364 // p[-11] = v; 6365 // // ... 6366 // case 1: 6367 // p[-2] = v; 6368 // p[-1] = v; 6369 // case 0: 6370 // p += 16; 6371 // } while (cnt); 6372 // } 6373 // if ((cnt & 1) == 1) { 6374 // *p++ = v; 6375 // } 6376 6377 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6378 6379 Label fini, skip, entry, loop; 6380 const int unroll = 8; // Number of stp instructions we'll unroll 6381 6382 cbz(cnt, fini); 6383 tbz(base, 3, skip); 6384 str(value, Address(post(base, 8))); 6385 sub(cnt, cnt, 1); 6386 bind(skip); 6387 6388 andr(rscratch1, cnt, (unroll-1) * 2); 6389 sub(cnt, cnt, rscratch1); 6390 add(base, base, rscratch1, Assembler::LSL, 3); 6391 adr(rscratch2, entry); 6392 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6393 br(rscratch2); 6394 6395 bind(loop); 6396 add(base, base, unroll * 16); 6397 for (int i = -unroll; i < 0; i++) 6398 stp(value, value, Address(base, i * 16)); 6399 bind(entry); 6400 subs(cnt, cnt, unroll * 2); 6401 br(Assembler::GE, loop); 6402 6403 tbz(cnt, 0, fini); 6404 str(value, Address(post(base, 8))); 6405 bind(fini); 6406 } 6407 6408 // Intrinsic for 6409 // 6410 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6411 // return the number of characters copied. 6412 // - java/lang/StringUTF16.compress 6413 // return index of non-latin1 character if copy fails, otherwise 'len'. 6414 // 6415 // This version always returns the number of characters copied, and does not 6416 // clobber the 'len' register. A successful copy will complete with the post- 6417 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6418 // post-condition: 0 <= 'res' < 'len'. 6419 // 6420 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6421 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6422 // beyond the acceptable, even though the footprint would be smaller. 6423 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6424 // avoid additional bloat. 6425 // 6426 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6427 void MacroAssembler::encode_iso_array(Register src, Register dst, 6428 Register len, Register res, bool ascii, 6429 FloatRegister vtmp0, FloatRegister vtmp1, 6430 FloatRegister vtmp2, FloatRegister vtmp3, 6431 FloatRegister vtmp4, FloatRegister vtmp5) 6432 { 6433 Register cnt = res; 6434 Register max = rscratch1; 6435 Register chk = rscratch2; 6436 6437 prfm(Address(src), PLDL1STRM); 6438 movw(cnt, len); 6439 6440 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6441 6442 Label LOOP_32, DONE_32, FAIL_32; 6443 6444 BIND(LOOP_32); 6445 { 6446 cmpw(cnt, 32); 6447 br(LT, DONE_32); 6448 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6449 // Extract lower bytes. 6450 FloatRegister vlo0 = vtmp4; 6451 FloatRegister vlo1 = vtmp5; 6452 uzp1(vlo0, T16B, vtmp0, vtmp1); 6453 uzp1(vlo1, T16B, vtmp2, vtmp3); 6454 // Merge bits... 6455 orr(vtmp0, T16B, vtmp0, vtmp1); 6456 orr(vtmp2, T16B, vtmp2, vtmp3); 6457 // Extract merged upper bytes. 6458 FloatRegister vhix = vtmp0; 6459 uzp2(vhix, T16B, vtmp0, vtmp2); 6460 // ISO-check on hi-parts (all zero). 6461 // ASCII-check on lo-parts (no sign). 6462 FloatRegister vlox = vtmp1; // Merge lower bytes. 6463 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6464 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6465 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6466 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6467 ASCII(orr(chk, chk, max)); 6468 cbnz(chk, FAIL_32); 6469 subw(cnt, cnt, 32); 6470 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6471 b(LOOP_32); 6472 } 6473 BIND(FAIL_32); 6474 sub(src, src, 64); 6475 BIND(DONE_32); 6476 6477 Label LOOP_8, SKIP_8; 6478 6479 BIND(LOOP_8); 6480 { 6481 cmpw(cnt, 8); 6482 br(LT, SKIP_8); 6483 FloatRegister vhi = vtmp0; 6484 FloatRegister vlo = vtmp1; 6485 ld1(vtmp3, T8H, src); 6486 uzp1(vlo, T16B, vtmp3, vtmp3); 6487 uzp2(vhi, T16B, vtmp3, vtmp3); 6488 // ISO-check on hi-parts (all zero). 6489 // ASCII-check on lo-parts (no sign). 6490 ASCII(cm(LT, vtmp2, T16B, vlo)); 6491 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6492 ASCII(umov(max, vtmp2, B, 0)); 6493 ASCII(orr(chk, chk, max)); 6494 cbnz(chk, SKIP_8); 6495 6496 strd(vlo, Address(post(dst, 8))); 6497 subw(cnt, cnt, 8); 6498 add(src, src, 16); 6499 b(LOOP_8); 6500 } 6501 BIND(SKIP_8); 6502 6503 #undef ASCII 6504 6505 Label LOOP, DONE; 6506 6507 cbz(cnt, DONE); 6508 BIND(LOOP); 6509 { 6510 Register chr = rscratch1; 6511 ldrh(chr, Address(post(src, 2))); 6512 tst(chr, ascii ? 0xff80 : 0xff00); 6513 br(NE, DONE); 6514 strb(chr, Address(post(dst, 1))); 6515 subs(cnt, cnt, 1); 6516 br(GT, LOOP); 6517 } 6518 BIND(DONE); 6519 // Return index where we stopped. 6520 subw(res, len, cnt); 6521 } 6522 6523 // Inflate byte[] array to char[]. 6524 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6525 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6526 FloatRegister vtmp1, FloatRegister vtmp2, 6527 FloatRegister vtmp3, Register tmp4) { 6528 Label big, done, after_init, to_stub; 6529 6530 assert_different_registers(src, dst, len, tmp4, rscratch1); 6531 6532 fmovd(vtmp1, 0.0); 6533 lsrw(tmp4, len, 3); 6534 bind(after_init); 6535 cbnzw(tmp4, big); 6536 // Short string: less than 8 bytes. 6537 { 6538 Label loop, tiny; 6539 6540 cmpw(len, 4); 6541 br(LT, tiny); 6542 // Use SIMD to do 4 bytes. 6543 ldrs(vtmp2, post(src, 4)); 6544 zip1(vtmp3, T8B, vtmp2, vtmp1); 6545 subw(len, len, 4); 6546 strd(vtmp3, post(dst, 8)); 6547 6548 cbzw(len, done); 6549 6550 // Do the remaining bytes by steam. 6551 bind(loop); 6552 ldrb(tmp4, post(src, 1)); 6553 strh(tmp4, post(dst, 2)); 6554 subw(len, len, 1); 6555 6556 bind(tiny); 6557 cbnz(len, loop); 6558 6559 b(done); 6560 } 6561 6562 if (SoftwarePrefetchHintDistance >= 0) { 6563 bind(to_stub); 6564 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6565 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6566 address tpc = trampoline_call(stub); 6567 if (tpc == nullptr) { 6568 DEBUG_ONLY(reset_labels(big, done)); 6569 postcond(pc() == badAddress); 6570 return nullptr; 6571 } 6572 b(after_init); 6573 } 6574 6575 // Unpack the bytes 8 at a time. 6576 bind(big); 6577 { 6578 Label loop, around, loop_last, loop_start; 6579 6580 if (SoftwarePrefetchHintDistance >= 0) { 6581 const int large_loop_threshold = (64 + 16)/8; 6582 ldrd(vtmp2, post(src, 8)); 6583 andw(len, len, 7); 6584 cmp(tmp4, (u1)large_loop_threshold); 6585 br(GE, to_stub); 6586 b(loop_start); 6587 6588 bind(loop); 6589 ldrd(vtmp2, post(src, 8)); 6590 bind(loop_start); 6591 subs(tmp4, tmp4, 1); 6592 br(EQ, loop_last); 6593 zip1(vtmp2, T16B, vtmp2, vtmp1); 6594 ldrd(vtmp3, post(src, 8)); 6595 st1(vtmp2, T8H, post(dst, 16)); 6596 subs(tmp4, tmp4, 1); 6597 zip1(vtmp3, T16B, vtmp3, vtmp1); 6598 st1(vtmp3, T8H, post(dst, 16)); 6599 br(NE, loop); 6600 b(around); 6601 bind(loop_last); 6602 zip1(vtmp2, T16B, vtmp2, vtmp1); 6603 st1(vtmp2, T8H, post(dst, 16)); 6604 bind(around); 6605 cbz(len, done); 6606 } else { 6607 andw(len, len, 7); 6608 bind(loop); 6609 ldrd(vtmp2, post(src, 8)); 6610 sub(tmp4, tmp4, 1); 6611 zip1(vtmp3, T16B, vtmp2, vtmp1); 6612 st1(vtmp3, T8H, post(dst, 16)); 6613 cbnz(tmp4, loop); 6614 } 6615 } 6616 6617 // Do the tail of up to 8 bytes. 6618 add(src, src, len); 6619 ldrd(vtmp3, Address(src, -8)); 6620 add(dst, dst, len, ext::uxtw, 1); 6621 zip1(vtmp3, T16B, vtmp3, vtmp1); 6622 strq(vtmp3, Address(dst, -16)); 6623 6624 bind(done); 6625 postcond(pc() != badAddress); 6626 return pc(); 6627 } 6628 6629 // Compress char[] array to byte[]. 6630 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6631 // Return the array length if every element in array can be encoded, 6632 // otherwise, the index of first non-latin1 (> 0xff) character. 6633 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6634 Register res, 6635 FloatRegister tmp0, FloatRegister tmp1, 6636 FloatRegister tmp2, FloatRegister tmp3, 6637 FloatRegister tmp4, FloatRegister tmp5) { 6638 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6639 } 6640 6641 // java.math.round(double a) 6642 // Returns the closest long to the argument, with ties rounding to 6643 // positive infinity. This requires some fiddling for corner 6644 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6645 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6646 FloatRegister ftmp) { 6647 Label DONE; 6648 BLOCK_COMMENT("java_round_double: { "); 6649 fmovd(rscratch1, src); 6650 // Use RoundToNearestTiesAway unless src small and -ve. 6651 fcvtasd(dst, src); 6652 // Test if src >= 0 || abs(src) >= 0x1.0p52 6653 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6654 mov(rscratch2, julong_cast(0x1.0p52)); 6655 cmp(rscratch1, rscratch2); 6656 br(HS, DONE); { 6657 // src < 0 && abs(src) < 0x1.0p52 6658 // src may have a fractional part, so add 0.5 6659 fmovd(ftmp, 0.5); 6660 faddd(ftmp, src, ftmp); 6661 // Convert double to jlong, use RoundTowardsNegative 6662 fcvtmsd(dst, ftmp); 6663 } 6664 bind(DONE); 6665 BLOCK_COMMENT("} java_round_double"); 6666 } 6667 6668 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6669 FloatRegister ftmp) { 6670 Label DONE; 6671 BLOCK_COMMENT("java_round_float: { "); 6672 fmovs(rscratch1, src); 6673 // Use RoundToNearestTiesAway unless src small and -ve. 6674 fcvtassw(dst, src); 6675 // Test if src >= 0 || abs(src) >= 0x1.0p23 6676 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6677 mov(rscratch2, jint_cast(0x1.0p23f)); 6678 cmp(rscratch1, rscratch2); 6679 br(HS, DONE); { 6680 // src < 0 && |src| < 0x1.0p23 6681 // src may have a fractional part, so add 0.5 6682 fmovs(ftmp, 0.5f); 6683 fadds(ftmp, src, ftmp); 6684 // Convert float to jint, use RoundTowardsNegative 6685 fcvtmssw(dst, ftmp); 6686 } 6687 bind(DONE); 6688 BLOCK_COMMENT("} java_round_float"); 6689 } 6690 6691 // get_thread() can be called anywhere inside generated code so we 6692 // need to save whatever non-callee save context might get clobbered 6693 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6694 // the call setup code. 6695 // 6696 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6697 // On other systems, the helper is a usual C function. 6698 // 6699 void MacroAssembler::get_thread(Register dst) { 6700 RegSet saved_regs = 6701 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6702 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6703 6704 protect_return_address(); 6705 push(saved_regs, sp); 6706 6707 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6708 blr(lr); 6709 if (dst != c_rarg0) { 6710 mov(dst, c_rarg0); 6711 } 6712 6713 pop(saved_regs, sp); 6714 authenticate_return_address(); 6715 } 6716 6717 void MacroAssembler::cache_wb(Address line) { 6718 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6719 assert(line.index() == noreg, "index should be noreg"); 6720 assert(line.offset() == 0, "offset should be 0"); 6721 // would like to assert this 6722 // assert(line._ext.shift == 0, "shift should be zero"); 6723 if (VM_Version::supports_dcpop()) { 6724 // writeback using clear virtual address to point of persistence 6725 dc(Assembler::CVAP, line.base()); 6726 } else { 6727 // no need to generate anything as Unsafe.writebackMemory should 6728 // never invoke this stub 6729 } 6730 } 6731 6732 void MacroAssembler::cache_wbsync(bool is_pre) { 6733 // we only need a barrier post sync 6734 if (!is_pre) { 6735 membar(Assembler::AnyAny); 6736 } 6737 } 6738 6739 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6740 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) { 6741 return; 6742 } 6743 // Make sure that native code does not change SVE vector length. 6744 Label verify_ok; 6745 movw(tmp, zr); 6746 sve_inc(tmp, B); 6747 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6748 br(EQ, verify_ok); 6749 stop("Error: SVE vector length has changed since jvm startup"); 6750 bind(verify_ok); 6751 } 6752 6753 void MacroAssembler::verify_ptrue() { 6754 Label verify_ok; 6755 if (!UseSVE) { 6756 return; 6757 } 6758 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6759 sve_dec(rscratch1, B); 6760 cbz(rscratch1, verify_ok); 6761 stop("Error: the preserved predicate register (p7) elements are not all true"); 6762 bind(verify_ok); 6763 } 6764 6765 void MacroAssembler::safepoint_isb() { 6766 isb(); 6767 #ifndef PRODUCT 6768 if (VerifyCrossModifyFence) { 6769 // Clear the thread state. 6770 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6771 } 6772 #endif 6773 } 6774 6775 #ifndef PRODUCT 6776 void MacroAssembler::verify_cross_modify_fence_not_required() { 6777 if (VerifyCrossModifyFence) { 6778 // Check if thread needs a cross modify fence. 6779 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6780 Label fence_not_required; 6781 cbz(rscratch1, fence_not_required); 6782 // If it does then fail. 6783 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure))); 6784 mov(c_rarg0, rthread); 6785 blr(rscratch1); 6786 bind(fence_not_required); 6787 } 6788 } 6789 #endif 6790 6791 void MacroAssembler::spin_wait() { 6792 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6793 switch (VM_Version::spin_wait_desc().inst()) { 6794 case SpinWait::NOP: 6795 nop(); 6796 break; 6797 case SpinWait::ISB: 6798 isb(); 6799 break; 6800 case SpinWait::YIELD: 6801 yield(); 6802 break; 6803 default: 6804 ShouldNotReachHere(); 6805 } 6806 } 6807 } 6808 6809 // Stack frame creation/removal 6810 6811 void MacroAssembler::enter(bool strip_ret_addr) { 6812 if (strip_ret_addr) { 6813 // Addresses can only be signed once. If there are multiple nested frames being created 6814 // in the same function, then the return address needs stripping first. 6815 strip_return_address(); 6816 } 6817 protect_return_address(); 6818 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6819 mov(rfp, sp); 6820 } 6821 6822 void MacroAssembler::leave() { 6823 mov(sp, rfp); 6824 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6825 authenticate_return_address(); 6826 } 6827 6828 // ROP Protection 6829 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6830 // destroying stack frames or whenever directly loading/storing the LR to memory. 6831 // If ROP protection is not set then these functions are no-ops. 6832 // For more details on PAC see pauth_aarch64.hpp. 6833 6834 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6835 // Uses value zero as the modifier. 6836 // 6837 void MacroAssembler::protect_return_address() { 6838 if (VM_Version::use_rop_protection()) { 6839 check_return_address(); 6840 paciaz(); 6841 } 6842 } 6843 6844 // Sign the return value in the given register. Use before updating the LR in the existing stack 6845 // frame for the current function. 6846 // Uses value zero as the modifier. 6847 // 6848 void MacroAssembler::protect_return_address(Register return_reg) { 6849 if (VM_Version::use_rop_protection()) { 6850 check_return_address(return_reg); 6851 paciza(return_reg); 6852 } 6853 } 6854 6855 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6856 // Uses value zero as the modifier. 6857 // 6858 void MacroAssembler::authenticate_return_address() { 6859 if (VM_Version::use_rop_protection()) { 6860 autiaz(); 6861 check_return_address(); 6862 } 6863 } 6864 6865 // Authenticate the return value in the given register. Use before updating the LR in the existing 6866 // stack frame for the current function. 6867 // Uses value zero as the modifier. 6868 // 6869 void MacroAssembler::authenticate_return_address(Register return_reg) { 6870 if (VM_Version::use_rop_protection()) { 6871 autiza(return_reg); 6872 check_return_address(return_reg); 6873 } 6874 } 6875 6876 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6877 // there is no guaranteed way of authenticating the LR. 6878 // 6879 void MacroAssembler::strip_return_address() { 6880 if (VM_Version::use_rop_protection()) { 6881 xpaclri(); 6882 } 6883 } 6884 6885 #ifndef PRODUCT 6886 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6887 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6888 // it is difficult to debug back to the callee function. 6889 // This function simply loads from the address in the given register. 6890 // Use directly after authentication to catch authentication failures. 6891 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6892 // 6893 void MacroAssembler::check_return_address(Register return_reg) { 6894 if (VM_Version::use_rop_protection()) { 6895 ldr(zr, Address(return_reg)); 6896 } 6897 } 6898 #endif 6899 6900 // The java_calling_convention describes stack locations as ideal slots on 6901 // a frame with no abi restrictions. Since we must observe abi restrictions 6902 // (like the placement of the register window) the slots must be biased by 6903 // the following value. 6904 static int reg2offset_in(VMReg r) { 6905 // Account for saved rfp and lr 6906 // This should really be in_preserve_stack_slots 6907 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6908 } 6909 6910 static int reg2offset_out(VMReg r) { 6911 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6912 } 6913 6914 // On 64bit we will store integer like items to the stack as 6915 // 64bits items (AArch64 ABI) even though java would only store 6916 // 32bits for a parameter. On 32bit it will simply be 32bits 6917 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6918 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6919 if (src.first()->is_stack()) { 6920 if (dst.first()->is_stack()) { 6921 // stack to stack 6922 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6923 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6924 } else { 6925 // stack to reg 6926 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6927 } 6928 } else if (dst.first()->is_stack()) { 6929 // reg to stack 6930 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6931 } else { 6932 if (dst.first() != src.first()) { 6933 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6934 } 6935 } 6936 } 6937 6938 // An oop arg. Must pass a handle not the oop itself 6939 void MacroAssembler::object_move( 6940 OopMap* map, 6941 int oop_handle_offset, 6942 int framesize_in_slots, 6943 VMRegPair src, 6944 VMRegPair dst, 6945 bool is_receiver, 6946 int* receiver_offset) { 6947 6948 // must pass a handle. First figure out the location we use as a handle 6949 6950 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6951 6952 // See if oop is null if it is we need no handle 6953 6954 if (src.first()->is_stack()) { 6955 6956 // Oop is already on the stack as an argument 6957 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6958 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6959 if (is_receiver) { 6960 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6961 } 6962 6963 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6964 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6965 // conditionally move a null 6966 cmp(rscratch1, zr); 6967 csel(rHandle, zr, rHandle, Assembler::EQ); 6968 } else { 6969 6970 // Oop is in an a register we must store it to the space we reserve 6971 // on the stack for oop_handles and pass a handle if oop is non-null 6972 6973 const Register rOop = src.first()->as_Register(); 6974 int oop_slot; 6975 if (rOop == j_rarg0) 6976 oop_slot = 0; 6977 else if (rOop == j_rarg1) 6978 oop_slot = 1; 6979 else if (rOop == j_rarg2) 6980 oop_slot = 2; 6981 else if (rOop == j_rarg3) 6982 oop_slot = 3; 6983 else if (rOop == j_rarg4) 6984 oop_slot = 4; 6985 else if (rOop == j_rarg5) 6986 oop_slot = 5; 6987 else if (rOop == j_rarg6) 6988 oop_slot = 6; 6989 else { 6990 assert(rOop == j_rarg7, "wrong register"); 6991 oop_slot = 7; 6992 } 6993 6994 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6995 int offset = oop_slot*VMRegImpl::stack_slot_size; 6996 6997 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6998 // Store oop in handle area, may be null 6999 str(rOop, Address(sp, offset)); 7000 if (is_receiver) { 7001 *receiver_offset = offset; 7002 } 7003 7004 cmp(rOop, zr); 7005 lea(rHandle, Address(sp, offset)); 7006 // conditionally move a null 7007 csel(rHandle, zr, rHandle, Assembler::EQ); 7008 } 7009 7010 // If arg is on the stack then place it otherwise it is already in correct reg. 7011 if (dst.first()->is_stack()) { 7012 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 7013 } 7014 } 7015 7016 // A float arg may have to do float reg int reg conversion 7017 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 7018 if (src.first()->is_stack()) { 7019 if (dst.first()->is_stack()) { 7020 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 7021 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 7022 } else { 7023 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7024 } 7025 } else if (src.first() != dst.first()) { 7026 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7027 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7028 else 7029 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7030 } 7031 } 7032 7033 // A long move 7034 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 7035 if (src.first()->is_stack()) { 7036 if (dst.first()->is_stack()) { 7037 // stack to stack 7038 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7039 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7040 } else { 7041 // stack to reg 7042 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 7043 } 7044 } else if (dst.first()->is_stack()) { 7045 // reg to stack 7046 // Do we really have to sign extend??? 7047 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 7048 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 7049 } else { 7050 if (dst.first() != src.first()) { 7051 mov(dst.first()->as_Register(), src.first()->as_Register()); 7052 } 7053 } 7054 } 7055 7056 7057 // A double move 7058 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 7059 if (src.first()->is_stack()) { 7060 if (dst.first()->is_stack()) { 7061 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7062 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7063 } else { 7064 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7065 } 7066 } else if (src.first() != dst.first()) { 7067 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7068 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7069 else 7070 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7071 } 7072 } 7073 7074 // Implements lightweight-locking. 7075 // 7076 // - obj: the object to be locked 7077 // - t1, t2, t3: temporary registers, will be destroyed 7078 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 7079 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { 7080 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7081 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); 7082 7083 Label push; 7084 const Register top = t1; 7085 const Register mark = t2; 7086 const Register t = t3; 7087 7088 // Preload the markWord. It is important that this is the first 7089 // instruction emitted as it is part of C1's null check semantics. 7090 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7091 7092 if (UseObjectMonitorTable) { 7093 // Clear cache in case fast locking succeeds. 7094 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); 7095 } 7096 7097 // Check if the lock-stack is full. 7098 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7099 cmpw(top, (unsigned)LockStack::end_offset()); 7100 br(Assembler::GE, slow); 7101 7102 // Check for recursion. 7103 subw(t, top, oopSize); 7104 ldr(t, Address(rthread, t)); 7105 cmp(obj, t); 7106 br(Assembler::EQ, push); 7107 7108 // Check header for monitor (0b10). 7109 tst(mark, markWord::monitor_value); 7110 br(Assembler::NE, slow); 7111 7112 // Try to lock. Transition lock bits 0b01 => 0b00 7113 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7114 orr(mark, mark, markWord::unlocked_value); 7115 eor(t, mark, markWord::unlocked_value); 7116 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 7117 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 7118 br(Assembler::NE, slow); 7119 7120 bind(push); 7121 // After successful lock, push object on lock-stack. 7122 str(obj, Address(rthread, top)); 7123 addw(top, top, oopSize); 7124 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7125 } 7126 7127 // Implements lightweight-unlocking. 7128 // 7129 // - obj: the object to be unlocked 7130 // - t1, t2, t3: temporary registers 7131 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 7132 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 7133 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7134 // cmpxchg clobbers rscratch1. 7135 assert_different_registers(obj, t1, t2, t3, rscratch1); 7136 7137 #ifdef ASSERT 7138 { 7139 // Check for lock-stack underflow. 7140 Label stack_ok; 7141 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7142 cmpw(t1, (unsigned)LockStack::start_offset()); 7143 br(Assembler::GE, stack_ok); 7144 STOP("Lock-stack underflow"); 7145 bind(stack_ok); 7146 } 7147 #endif 7148 7149 Label unlocked, push_and_slow; 7150 const Register top = t1; 7151 const Register mark = t2; 7152 const Register t = t3; 7153 7154 // Check if obj is top of lock-stack. 7155 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7156 subw(top, top, oopSize); 7157 ldr(t, Address(rthread, top)); 7158 cmp(obj, t); 7159 br(Assembler::NE, slow); 7160 7161 // Pop lock-stack. 7162 DEBUG_ONLY(str(zr, Address(rthread, top));) 7163 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7164 7165 // Check if recursive. 7166 subw(t, top, oopSize); 7167 ldr(t, Address(rthread, t)); 7168 cmp(obj, t); 7169 br(Assembler::EQ, unlocked); 7170 7171 // Not recursive. Check header for monitor (0b10). 7172 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7173 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 7174 7175 #ifdef ASSERT 7176 // Check header not unlocked (0b01). 7177 Label not_unlocked; 7178 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 7179 stop("lightweight_unlock already unlocked"); 7180 bind(not_unlocked); 7181 #endif 7182 7183 // Try to unlock. Transition lock bits 0b00 => 0b01 7184 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7185 orr(t, mark, markWord::unlocked_value); 7186 cmpxchg(obj, mark, t, Assembler::xword, 7187 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 7188 br(Assembler::EQ, unlocked); 7189 7190 bind(push_and_slow); 7191 // Restore lock-stack and handle the unlock in runtime. 7192 DEBUG_ONLY(str(obj, Address(rthread, top));) 7193 addw(top, top, oopSize); 7194 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7195 b(slow); 7196 7197 bind(unlocked); 7198 }