1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "ci/ciEnv.hpp" 29 #include "code/compiledIC.hpp" 30 #include "compiler/compileTask.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/cardTableBarrierSet.hpp" 36 #include "gc/shared/cardTable.hpp" 37 #include "gc/shared/collectedHeap.hpp" 38 #include "gc/shared/tlab_globals.hpp" 39 #include "interpreter/bytecodeHistogram.hpp" 40 #include "interpreter/interpreter.hpp" 41 #include "interpreter/interpreterRuntime.hpp" 42 #include "jvm.h" 43 #include "memory/resourceArea.hpp" 44 #include "memory/universe.hpp" 45 #include "nativeInst_aarch64.hpp" 46 #include "oops/accessDecorators.hpp" 47 #include "oops/compressedKlass.inline.hpp" 48 #include "oops/compressedOops.inline.hpp" 49 #include "oops/klass.inline.hpp" 50 #include "runtime/continuation.hpp" 51 #include "runtime/icache.hpp" 52 #include "runtime/interfaceSupport.inline.hpp" 53 #include "runtime/javaThread.hpp" 54 #include "runtime/jniHandles.inline.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/globalDefinitions.hpp" 58 #include "utilities/powerOfTwo.hpp" 59 #ifdef COMPILER1 60 #include "c1/c1_LIRAssembler.hpp" 61 #endif 62 #ifdef COMPILER2 63 #include "oops/oop.hpp" 64 #include "opto/compile.hpp" 65 #include "opto/node.hpp" 66 #include "opto/output.hpp" 67 #endif 68 69 #include <sys/types.h> 70 71 #ifdef PRODUCT 72 #define BLOCK_COMMENT(str) /* nothing */ 73 #else 74 #define BLOCK_COMMENT(str) block_comment(str) 75 #endif 76 #define STOP(str) stop(str); 77 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 78 79 #ifdef ASSERT 80 extern "C" void disnm(intptr_t p); 81 #endif 82 // Target-dependent relocation processing 83 // 84 // Instruction sequences whose target may need to be retrieved or 85 // patched are distinguished by their leading instruction, sorting 86 // them into three main instruction groups and related subgroups. 87 // 88 // 1) Branch, Exception and System (insn count = 1) 89 // 1a) Unconditional branch (immediate): 90 // b/bl imm19 91 // 1b) Compare & branch (immediate): 92 // cbz/cbnz Rt imm19 93 // 1c) Test & branch (immediate): 94 // tbz/tbnz Rt imm14 95 // 1d) Conditional branch (immediate): 96 // b.cond imm19 97 // 98 // 2) Loads and Stores (insn count = 1) 99 // 2a) Load register literal: 100 // ldr Rt imm19 101 // 102 // 3) Data Processing Immediate (insn count = 2 or 3) 103 // 3a) PC-rel. addressing 104 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 105 // adr/adrp Rx imm21; add Ry Rx #imm12 106 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 107 // adr/adrp Rx imm21 108 // adr/adrp Rx imm21; movk Rx #imm16<<32 109 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 110 // The latter form can only happen when the target is an 111 // ExternalAddress, and (by definition) ExternalAddresses don't 112 // move. Because of that property, there is never any need to 113 // patch the last of the three instructions. However, 114 // MacroAssembler::target_addr_for_insn takes all three 115 // instructions into account and returns the correct address. 116 // 3b) Move wide (immediate) 117 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 118 // 119 // A switch on a subset of the instruction's bits provides an 120 // efficient dispatch to these subcases. 121 // 122 // insn[28:26] -> main group ('x' == don't care) 123 // 00x -> UNALLOCATED 124 // 100 -> Data Processing Immediate 125 // 101 -> Branch, Exception and System 126 // x1x -> Loads and Stores 127 // 128 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 129 // n.b. in some cases extra bits need to be checked to verify the 130 // instruction is as expected 131 // 132 // 1) ... xx101x Branch, Exception and System 133 // 1a) 00___x Unconditional branch (immediate) 134 // 1b) 01___0 Compare & branch (immediate) 135 // 1c) 01___1 Test & branch (immediate) 136 // 1d) 10___0 Conditional branch (immediate) 137 // other Should not happen 138 // 139 // 2) ... xxx1x0 Loads and Stores 140 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 141 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 142 // strictly should be 64 bit non-FP/SIMD i.e. 143 // 0101_000 (i.e. requires insn[31:24] == 01011000) 144 // 145 // 3) ... xx100x Data Processing Immediate 146 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 147 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 148 // strictly should be 64 bit movz #imm16<<0 149 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 150 // 151 class RelocActions { 152 protected: 153 typedef int (*reloc_insn)(address insn_addr, address &target); 154 155 virtual reloc_insn adrpMem() = 0; 156 virtual reloc_insn adrpAdd() = 0; 157 virtual reloc_insn adrpMovk() = 0; 158 159 const address _insn_addr; 160 const uint32_t _insn; 161 162 static uint32_t insn_at(address insn_addr, int n) { 163 return ((uint32_t*)insn_addr)[n]; 164 } 165 uint32_t insn_at(int n) const { 166 return insn_at(_insn_addr, n); 167 } 168 169 public: 170 171 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 172 RelocActions(address insn_addr, uint32_t insn) 173 : _insn_addr(insn_addr), _insn(insn) {} 174 175 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 176 virtual int conditionalBranch(address insn_addr, address &target) = 0; 177 virtual int testAndBranch(address insn_addr, address &target) = 0; 178 virtual int loadStore(address insn_addr, address &target) = 0; 179 virtual int adr(address insn_addr, address &target) = 0; 180 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 181 virtual int immediate(address insn_addr, address &target) = 0; 182 virtual void verify(address insn_addr, address &target) = 0; 183 184 int ALWAYSINLINE run(address insn_addr, address &target) { 185 int instructions = 1; 186 187 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 188 switch(dispatch) { 189 case 0b001010: 190 case 0b001011: { 191 instructions = unconditionalBranch(insn_addr, target); 192 break; 193 } 194 case 0b101010: // Conditional branch (immediate) 195 case 0b011010: { // Compare & branch (immediate) 196 instructions = conditionalBranch(insn_addr, target); 197 break; 198 } 199 case 0b011011: { 200 instructions = testAndBranch(insn_addr, target); 201 break; 202 } 203 case 0b001100: 204 case 0b001110: 205 case 0b011100: 206 case 0b011110: 207 case 0b101100: 208 case 0b101110: 209 case 0b111100: 210 case 0b111110: { 211 // load/store 212 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 213 // Load register (literal) 214 instructions = loadStore(insn_addr, target); 215 break; 216 } else { 217 // nothing to do 218 assert(target == nullptr, "did not expect to relocate target for polling page load"); 219 } 220 break; 221 } 222 case 0b001000: 223 case 0b011000: 224 case 0b101000: 225 case 0b111000: { 226 // adr/adrp 227 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 228 int shift = Instruction_aarch64::extract(_insn, 31, 31); 229 if (shift) { 230 uint32_t insn2 = insn_at(1); 231 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 232 Instruction_aarch64::extract(_insn, 4, 0) == 233 Instruction_aarch64::extract(insn2, 9, 5)) { 234 instructions = adrp(insn_addr, target, adrpMem()); 235 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 236 Instruction_aarch64::extract(_insn, 4, 0) == 237 Instruction_aarch64::extract(insn2, 4, 0)) { 238 instructions = adrp(insn_addr, target, adrpAdd()); 239 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 240 Instruction_aarch64::extract(_insn, 4, 0) == 241 Instruction_aarch64::extract(insn2, 4, 0)) { 242 instructions = adrp(insn_addr, target, adrpMovk()); 243 } else { 244 ShouldNotReachHere(); 245 } 246 } else { 247 instructions = adr(insn_addr, target); 248 } 249 break; 250 } 251 case 0b001001: 252 case 0b011001: 253 case 0b101001: 254 case 0b111001: { 255 instructions = immediate(insn_addr, target); 256 break; 257 } 258 default: { 259 ShouldNotReachHere(); 260 } 261 } 262 263 verify(insn_addr, target); 264 return instructions * NativeInstruction::instruction_size; 265 } 266 }; 267 268 class Patcher : public RelocActions { 269 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 270 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 271 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 272 273 public: 274 Patcher(address insn_addr) : RelocActions(insn_addr) {} 275 276 virtual int unconditionalBranch(address insn_addr, address &target) { 277 intptr_t offset = (target - insn_addr) >> 2; 278 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 279 return 1; 280 } 281 virtual int conditionalBranch(address insn_addr, address &target) { 282 intptr_t offset = (target - insn_addr) >> 2; 283 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 284 return 1; 285 } 286 virtual int testAndBranch(address insn_addr, address &target) { 287 intptr_t offset = (target - insn_addr) >> 2; 288 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 289 return 1; 290 } 291 virtual int loadStore(address insn_addr, address &target) { 292 intptr_t offset = (target - insn_addr) >> 2; 293 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 294 return 1; 295 } 296 virtual int adr(address insn_addr, address &target) { 297 #ifdef ASSERT 298 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 299 #endif 300 // PC-rel. addressing 301 ptrdiff_t offset = target - insn_addr; 302 int offset_lo = offset & 3; 303 offset >>= 2; 304 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 305 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 306 return 1; 307 } 308 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 309 int instructions = 1; 310 #ifdef ASSERT 311 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 312 #endif 313 ptrdiff_t offset = target - insn_addr; 314 instructions = 2; 315 precond(inner != nullptr); 316 // Give the inner reloc a chance to modify the target. 317 address adjusted_target = target; 318 instructions = (*inner)(insn_addr, adjusted_target); 319 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 320 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 321 offset = adr_page - pc_page; 322 int offset_lo = offset & 3; 323 offset >>= 2; 324 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 325 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 326 return instructions; 327 } 328 static int adrpMem_impl(address insn_addr, address &target) { 329 uintptr_t dest = (uintptr_t)target; 330 int offset_lo = dest & 0xfff; 331 uint32_t insn2 = insn_at(insn_addr, 1); 332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 334 guarantee(((dest >> size) << size) == dest, "misaligned target"); 335 return 2; 336 } 337 static int adrpAdd_impl(address insn_addr, address &target) { 338 uintptr_t dest = (uintptr_t)target; 339 int offset_lo = dest & 0xfff; 340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 341 return 2; 342 } 343 static int adrpMovk_impl(address insn_addr, address &target) { 344 uintptr_t dest = uintptr_t(target); 345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 347 target = address(dest); 348 return 2; 349 } 350 virtual int immediate(address insn_addr, address &target) { 351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 352 uint64_t dest = (uint64_t)target; 353 // Move wide constant 354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 359 return 3; 360 } 361 virtual void verify(address insn_addr, address &target) { 362 #ifdef ASSERT 363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 364 if (!(address_is == target)) { 365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 366 disnm((intptr_t)insn_addr); 367 assert(address_is == target, "should be"); 368 } 369 #endif 370 } 371 }; 372 373 // If insn1 and insn2 use the same register to form an address, either 374 // by an offsetted LDR or a simple ADD, return the offset. If the 375 // second instruction is an LDR, the offset may be scaled. 376 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 377 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 378 Instruction_aarch64::extract(insn1, 4, 0) == 379 Instruction_aarch64::extract(insn2, 9, 5)) { 380 // Load/store register (unsigned immediate) 381 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 382 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 383 byte_offset <<= size; 384 return true; 385 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 386 Instruction_aarch64::extract(insn1, 4, 0) == 387 Instruction_aarch64::extract(insn2, 4, 0)) { 388 // add (immediate) 389 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 390 return true; 391 } 392 return false; 393 } 394 395 class AArch64Decoder : public RelocActions { 396 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 397 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 398 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 399 400 public: 401 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 402 403 virtual int loadStore(address insn_addr, address &target) { 404 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 405 target = insn_addr + (offset << 2); 406 return 1; 407 } 408 virtual int unconditionalBranch(address insn_addr, address &target) { 409 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 410 target = insn_addr + (offset << 2); 411 return 1; 412 } 413 virtual int conditionalBranch(address insn_addr, address &target) { 414 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 415 target = address(((uint64_t)insn_addr + (offset << 2))); 416 return 1; 417 } 418 virtual int testAndBranch(address insn_addr, address &target) { 419 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 420 target = address(((uint64_t)insn_addr + (offset << 2))); 421 return 1; 422 } 423 virtual int adr(address insn_addr, address &target) { 424 // PC-rel. addressing 425 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 426 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 427 target = address((uint64_t)insn_addr + offset); 428 return 1; 429 } 430 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 431 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 432 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 433 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 434 int shift = 12; 435 offset <<= shift; 436 uint64_t target_page = ((uint64_t)insn_addr) + offset; 437 target_page &= ((uint64_t)-1) << shift; 438 uint32_t insn2 = insn_at(1); 439 target = address(target_page); 440 precond(inner != nullptr); 441 (*inner)(insn_addr, target); 442 return 2; 443 } 444 static int adrpMem_impl(address insn_addr, address &target) { 445 uint32_t insn2 = insn_at(insn_addr, 1); 446 // Load/store register (unsigned immediate) 447 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 448 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 449 byte_offset <<= size; 450 target += byte_offset; 451 return 2; 452 } 453 static int adrpAdd_impl(address insn_addr, address &target) { 454 uint32_t insn2 = insn_at(insn_addr, 1); 455 // add (immediate) 456 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 457 target += byte_offset; 458 return 2; 459 } 460 static int adrpMovk_impl(address insn_addr, address &target) { 461 uint32_t insn2 = insn_at(insn_addr, 1); 462 uint64_t dest = uint64_t(target); 463 dest = (dest & 0xffff0000ffffffff) | 464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 465 target = address(dest); 466 467 // We know the destination 4k page. Maybe we have a third 468 // instruction. 469 uint32_t insn = insn_at(insn_addr, 0); 470 uint32_t insn3 = insn_at(insn_addr, 2); 471 ptrdiff_t byte_offset; 472 if (offset_for(insn, insn3, byte_offset)) { 473 target += byte_offset; 474 return 3; 475 } else { 476 return 2; 477 } 478 } 479 virtual int immediate(address insn_addr, address &target) { 480 uint32_t *insns = (uint32_t *)insn_addr; 481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 482 // Move wide constant: movz, movk, movk. See movptr(). 483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 490 return 3; 491 } 492 virtual void verify(address insn_addr, address &target) { 493 } 494 }; 495 496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 497 AArch64Decoder decoder(insn_addr, insn); 498 address target; 499 decoder.run(insn_addr, target); 500 return target; 501 } 502 503 // Patch any kind of instruction; there may be several instructions. 504 // Return the total length (in bytes) of the instructions. 505 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 506 Patcher patcher(insn_addr); 507 return patcher.run(insn_addr, target); 508 } 509 510 int MacroAssembler::patch_oop(address insn_addr, address o) { 511 int instructions; 512 unsigned insn = *(unsigned*)insn_addr; 513 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 514 515 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 516 // narrow OOPs by setting the upper 16 bits in the first 517 // instruction. 518 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 519 // Move narrow OOP 520 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 521 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 522 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 523 instructions = 2; 524 } else { 525 // Move wide OOP 526 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 527 uintptr_t dest = (uintptr_t)o; 528 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 529 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 530 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 531 instructions = 3; 532 } 533 return instructions * NativeInstruction::instruction_size; 534 } 535 536 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 537 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 538 // We encode narrow ones by setting the upper 16 bits in the first 539 // instruction. 540 NativeInstruction *insn = nativeInstruction_at(insn_addr); 541 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 542 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 543 544 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 545 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 546 return 2 * NativeInstruction::instruction_size; 547 } 548 549 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 550 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 551 return nullptr; 552 } 553 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 554 } 555 556 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 557 if (acquire) { 558 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 559 ldar(tmp, tmp); 560 } else { 561 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 562 } 563 if (at_return) { 564 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 565 // we may safely use the sp instead to perform the stack watermark check. 566 cmp(in_nmethod ? sp : rfp, tmp); 567 br(Assembler::HI, slow_path); 568 } else { 569 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 570 } 571 } 572 573 void MacroAssembler::rt_call(address dest, Register tmp) { 574 CodeBlob *cb = CodeCache::find_blob(dest); 575 if (cb) { 576 far_call(RuntimeAddress(dest)); 577 } else { 578 lea(tmp, RuntimeAddress(dest)); 579 blr(tmp); 580 } 581 } 582 583 void MacroAssembler::push_cont_fastpath(Register java_thread) { 584 if (!Continuations::enabled()) return; 585 Label done; 586 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 587 cmp(sp, rscratch1); 588 br(Assembler::LS, done); 589 mov(rscratch1, sp); // we can't use sp as the source in str 590 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 591 bind(done); 592 } 593 594 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 595 if (!Continuations::enabled()) return; 596 Label done; 597 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 598 cmp(sp, rscratch1); 599 br(Assembler::LO, done); 600 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 601 bind(done); 602 } 603 604 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 605 // we must set sp to zero to clear frame 606 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 607 608 // must clear fp, so that compiled frames are not confused; it is 609 // possible that we need it only for debugging 610 if (clear_fp) { 611 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 612 } 613 614 // Always clear the pc because it could have been set by make_walkable() 615 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 616 } 617 618 // Calls to C land 619 // 620 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 621 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 622 // has to be reset to 0. This is required to allow proper stack traversal. 623 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 624 Register last_java_fp, 625 Register last_java_pc, 626 Register scratch) { 627 628 if (last_java_pc->is_valid()) { 629 str(last_java_pc, Address(rthread, 630 JavaThread::frame_anchor_offset() 631 + JavaFrameAnchor::last_Java_pc_offset())); 632 } 633 634 // determine last_java_sp register 635 if (last_java_sp == sp) { 636 mov(scratch, sp); 637 last_java_sp = scratch; 638 } else if (!last_java_sp->is_valid()) { 639 last_java_sp = esp; 640 } 641 642 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 643 644 // last_java_fp is optional 645 if (last_java_fp->is_valid()) { 646 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 647 } 648 } 649 650 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 651 Register last_java_fp, 652 address last_java_pc, 653 Register scratch) { 654 assert(last_java_pc != nullptr, "must provide a valid PC"); 655 656 adr(scratch, last_java_pc); 657 str(scratch, Address(rthread, 658 JavaThread::frame_anchor_offset() 659 + JavaFrameAnchor::last_Java_pc_offset())); 660 661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 662 } 663 664 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 665 Register last_java_fp, 666 Label &L, 667 Register scratch) { 668 if (L.is_bound()) { 669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 670 } else { 671 InstructionMark im(this); 672 L.add_patch_at(code(), locator()); 673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 674 } 675 } 676 677 static inline bool target_needs_far_branch(address addr) { 678 // codecache size <= 128M 679 if (!MacroAssembler::far_branches()) { 680 return false; 681 } 682 // codecache size > 240M 683 if (MacroAssembler::codestub_branch_needs_far_jump()) { 684 return true; 685 } 686 // codecache size: 128M..240M 687 return !CodeCache::is_non_nmethod(addr); 688 } 689 690 void MacroAssembler::far_call(Address entry, Register tmp) { 691 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 692 assert(CodeCache::find_blob(entry.target()) != nullptr, 693 "destination of far call not found in code cache"); 694 assert(entry.rspec().type() == relocInfo::external_word_type 695 || entry.rspec().type() == relocInfo::runtime_call_type 696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 697 if (target_needs_far_branch(entry.target())) { 698 uint64_t offset; 699 // We can use ADRP here because we know that the total size of 700 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 701 adrp(tmp, entry, offset); 702 add(tmp, tmp, offset); 703 blr(tmp); 704 } else { 705 bl(entry); 706 } 707 } 708 709 int MacroAssembler::far_jump(Address entry, Register tmp) { 710 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 711 assert(CodeCache::find_blob(entry.target()) != nullptr, 712 "destination of far call not found in code cache"); 713 assert(entry.rspec().type() == relocInfo::external_word_type 714 || entry.rspec().type() == relocInfo::runtime_call_type 715 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 716 address start = pc(); 717 if (target_needs_far_branch(entry.target())) { 718 uint64_t offset; 719 // We can use ADRP here because we know that the total size of 720 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 721 adrp(tmp, entry, offset); 722 add(tmp, tmp, offset); 723 br(tmp); 724 } else { 725 b(entry); 726 } 727 return pc() - start; 728 } 729 730 void MacroAssembler::reserved_stack_check() { 731 // testing if reserved zone needs to be enabled 732 Label no_reserved_zone_enabling; 733 734 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 735 cmp(sp, rscratch1); 736 br(Assembler::LO, no_reserved_zone_enabling); 737 738 enter(); // LR and FP are live. 739 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone))); 740 mov(c_rarg0, rthread); 741 blr(rscratch1); 742 leave(); 743 744 // We have already removed our own frame. 745 // throw_delayed_StackOverflowError will think that it's been 746 // called by our caller. 747 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 748 br(rscratch1); 749 should_not_reach_here(); 750 751 bind(no_reserved_zone_enabling); 752 } 753 754 static void pass_arg0(MacroAssembler* masm, Register arg) { 755 if (c_rarg0 != arg ) { 756 masm->mov(c_rarg0, arg); 757 } 758 } 759 760 static void pass_arg1(MacroAssembler* masm, Register arg) { 761 if (c_rarg1 != arg ) { 762 masm->mov(c_rarg1, arg); 763 } 764 } 765 766 static void pass_arg2(MacroAssembler* masm, Register arg) { 767 if (c_rarg2 != arg ) { 768 masm->mov(c_rarg2, arg); 769 } 770 } 771 772 static void pass_arg3(MacroAssembler* masm, Register arg) { 773 if (c_rarg3 != arg ) { 774 masm->mov(c_rarg3, arg); 775 } 776 } 777 778 static bool is_preemptable(address entry_point) { 779 return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter); 780 } 781 782 void MacroAssembler::call_VM_base(Register oop_result, 783 Register java_thread, 784 Register last_java_sp, 785 address entry_point, 786 int number_of_arguments, 787 bool check_exceptions) { 788 // determine java_thread register 789 if (!java_thread->is_valid()) { 790 java_thread = rthread; 791 } 792 793 // determine last_java_sp register 794 if (!last_java_sp->is_valid()) { 795 last_java_sp = esp; 796 } 797 798 // debugging support 799 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 800 assert(java_thread == rthread, "unexpected register"); 801 #ifdef ASSERT 802 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 803 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 804 #endif // ASSERT 805 806 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 807 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 808 809 // push java thread (becomes first argument of C function) 810 811 mov(c_rarg0, java_thread); 812 813 // set last Java frame before call 814 assert(last_java_sp != rfp, "can't use rfp"); 815 816 Label l; 817 if (is_preemptable(entry_point)) { 818 // skip setting last_pc since we already set it to desired value. 819 set_last_Java_frame(last_java_sp, rfp, noreg, rscratch1); 820 } else { 821 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 822 } 823 824 // do the call, remove parameters 825 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 826 827 // lr could be poisoned with PAC signature during throw_pending_exception 828 // if it was tail-call optimized by compiler, since lr is not callee-saved 829 // reload it with proper value 830 adr(lr, l); 831 832 // reset last Java frame 833 // Only interpreter should have to clear fp 834 reset_last_Java_frame(true); 835 836 // C++ interp handles this in the interpreter 837 check_and_handle_popframe(java_thread); 838 check_and_handle_earlyret(java_thread); 839 840 if (check_exceptions) { 841 // check for pending exceptions (java_thread is set upon return) 842 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 843 Label ok; 844 cbz(rscratch1, ok); 845 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 846 br(rscratch1); 847 bind(ok); 848 } 849 850 // get oop result if there is one and reset the value in the thread 851 if (oop_result->is_valid()) { 852 get_vm_result_oop(oop_result, java_thread); 853 } 854 } 855 856 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 857 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 858 } 859 860 // Check the entry target is always reachable from any branch. 861 static bool is_always_within_branch_range(Address entry) { 862 const address target = entry.target(); 863 864 if (!CodeCache::contains(target)) { 865 // We always use trampolines for callees outside CodeCache. 866 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 867 return false; 868 } 869 870 if (!MacroAssembler::far_branches()) { 871 return true; 872 } 873 874 if (entry.rspec().type() == relocInfo::runtime_call_type) { 875 // Runtime calls are calls of a non-compiled method (stubs, adapters). 876 // Non-compiled methods stay forever in CodeCache. 877 // We check whether the longest possible branch is within the branch range. 878 assert(CodeCache::find_blob(target) != nullptr && 879 !CodeCache::find_blob(target)->is_nmethod(), 880 "runtime call of compiled method"); 881 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 882 const address left_longest_branch_start = CodeCache::low_bound(); 883 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 884 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 885 return is_reachable; 886 } 887 888 return false; 889 } 890 891 // Maybe emit a call via a trampoline. If the code cache is small 892 // trampolines won't be emitted. 893 address MacroAssembler::trampoline_call(Address entry) { 894 assert(entry.rspec().type() == relocInfo::runtime_call_type 895 || entry.rspec().type() == relocInfo::opt_virtual_call_type 896 || entry.rspec().type() == relocInfo::static_call_type 897 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 898 899 address target = entry.target(); 900 901 if (!is_always_within_branch_range(entry)) { 902 if (!in_scratch_emit_size()) { 903 // We don't want to emit a trampoline if C2 is generating dummy 904 // code during its branch shortening phase. 905 if (entry.rspec().type() == relocInfo::runtime_call_type) { 906 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 907 code()->share_trampoline_for(entry.target(), offset()); 908 } else { 909 address stub = emit_trampoline_stub(offset(), target); 910 if (stub == nullptr) { 911 postcond(pc() == badAddress); 912 return nullptr; // CodeCache is full 913 } 914 } 915 } 916 target = pc(); 917 } 918 919 address call_pc = pc(); 920 relocate(entry.rspec()); 921 bl(target); 922 923 postcond(pc() != badAddress); 924 return call_pc; 925 } 926 927 // Emit a trampoline stub for a call to a target which is too far away. 928 // 929 // code sequences: 930 // 931 // call-site: 932 // branch-and-link to <destination> or <trampoline stub> 933 // 934 // Related trampoline stub for this call site in the stub section: 935 // load the call target from the constant pool 936 // branch (LR still points to the call site above) 937 938 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 939 address dest) { 940 // Max stub size: alignment nop, TrampolineStub. 941 address stub = start_a_stub(max_trampoline_stub_size()); 942 if (stub == nullptr) { 943 return nullptr; // CodeBuffer::expand failed 944 } 945 946 // Create a trampoline stub relocation which relates this trampoline stub 947 // with the call instruction at insts_call_instruction_offset in the 948 // instructions code-section. 949 align(wordSize); 950 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 951 + insts_call_instruction_offset)); 952 const int stub_start_offset = offset(); 953 954 // Now, create the trampoline stub's code: 955 // - load the call 956 // - call 957 Label target; 958 ldr(rscratch1, target); 959 br(rscratch1); 960 bind(target); 961 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 962 "should be"); 963 emit_int64((int64_t)dest); 964 965 const address stub_start_addr = addr_at(stub_start_offset); 966 967 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 968 969 end_a_stub(); 970 return stub_start_addr; 971 } 972 973 int MacroAssembler::max_trampoline_stub_size() { 974 // Max stub size: alignment nop, TrampolineStub. 975 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 976 } 977 978 void MacroAssembler::emit_static_call_stub() { 979 // CompiledDirectCall::set_to_interpreted knows the 980 // exact layout of this stub. 981 982 isb(); 983 mov_metadata(rmethod, nullptr); 984 985 // Jump to the entry point of the c2i stub. 986 movptr(rscratch1, 0); 987 br(rscratch1); 988 } 989 990 int MacroAssembler::static_call_stub_size() { 991 // isb; movk; movz; movz; movk; movz; movz; br 992 return 8 * NativeInstruction::instruction_size; 993 } 994 995 void MacroAssembler::c2bool(Register x) { 996 // implements x == 0 ? 0 : 1 997 // note: must only look at least-significant byte of x 998 // since C-style booleans are stored in one byte 999 // only! (was bug) 1000 tst(x, 0xff); 1001 cset(x, Assembler::NE); 1002 } 1003 1004 address MacroAssembler::ic_call(address entry, jint method_index) { 1005 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1006 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1007 return trampoline_call(Address(entry, rh)); 1008 } 1009 1010 int MacroAssembler::ic_check_size() { 1011 int extra_instructions = UseCompactObjectHeaders ? 1 : 0; 1012 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1013 return NativeInstruction::instruction_size * (7 + extra_instructions); 1014 } else { 1015 return NativeInstruction::instruction_size * (5 + extra_instructions); 1016 } 1017 } 1018 1019 int MacroAssembler::ic_check(int end_alignment) { 1020 Register receiver = j_rarg0; 1021 Register data = rscratch2; 1022 Register tmp1 = rscratch1; 1023 Register tmp2 = r10; 1024 1025 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1026 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1027 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1028 // before the inline cache check here, and not after 1029 align(end_alignment, offset() + ic_check_size()); 1030 1031 int uep_offset = offset(); 1032 1033 if (UseCompactObjectHeaders) { 1034 load_narrow_klass_compact(tmp1, receiver); 1035 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1036 cmpw(tmp1, tmp2); 1037 } else if (UseCompressedClassPointers) { 1038 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1039 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1040 cmpw(tmp1, tmp2); 1041 } else { 1042 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1043 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1044 cmp(tmp1, tmp2); 1045 } 1046 1047 Label dont; 1048 br(Assembler::EQ, dont); 1049 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1050 bind(dont); 1051 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1052 1053 return uep_offset; 1054 } 1055 1056 // Implementation of call_VM versions 1057 1058 void MacroAssembler::call_VM(Register oop_result, 1059 address entry_point, 1060 bool check_exceptions) { 1061 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1062 } 1063 1064 void MacroAssembler::call_VM(Register oop_result, 1065 address entry_point, 1066 Register arg_1, 1067 bool check_exceptions) { 1068 pass_arg1(this, arg_1); 1069 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1070 } 1071 1072 void MacroAssembler::call_VM(Register oop_result, 1073 address entry_point, 1074 Register arg_1, 1075 Register arg_2, 1076 bool check_exceptions) { 1077 assert_different_registers(arg_1, c_rarg2); 1078 pass_arg2(this, arg_2); 1079 pass_arg1(this, arg_1); 1080 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1081 } 1082 1083 void MacroAssembler::call_VM(Register oop_result, 1084 address entry_point, 1085 Register arg_1, 1086 Register arg_2, 1087 Register arg_3, 1088 bool check_exceptions) { 1089 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1090 assert_different_registers(arg_2, c_rarg3); 1091 pass_arg3(this, arg_3); 1092 1093 pass_arg2(this, arg_2); 1094 1095 pass_arg1(this, arg_1); 1096 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1097 } 1098 1099 void MacroAssembler::call_VM(Register oop_result, 1100 Register last_java_sp, 1101 address entry_point, 1102 int number_of_arguments, 1103 bool check_exceptions) { 1104 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1105 } 1106 1107 void MacroAssembler::call_VM(Register oop_result, 1108 Register last_java_sp, 1109 address entry_point, 1110 Register arg_1, 1111 bool check_exceptions) { 1112 pass_arg1(this, arg_1); 1113 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1114 } 1115 1116 void MacroAssembler::call_VM(Register oop_result, 1117 Register last_java_sp, 1118 address entry_point, 1119 Register arg_1, 1120 Register arg_2, 1121 bool check_exceptions) { 1122 1123 assert_different_registers(arg_1, c_rarg2); 1124 pass_arg2(this, arg_2); 1125 pass_arg1(this, arg_1); 1126 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1127 } 1128 1129 void MacroAssembler::call_VM(Register oop_result, 1130 Register last_java_sp, 1131 address entry_point, 1132 Register arg_1, 1133 Register arg_2, 1134 Register arg_3, 1135 bool check_exceptions) { 1136 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1137 assert_different_registers(arg_2, c_rarg3); 1138 pass_arg3(this, arg_3); 1139 pass_arg2(this, arg_2); 1140 pass_arg1(this, arg_1); 1141 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1142 } 1143 1144 1145 void MacroAssembler::get_vm_result_oop(Register oop_result, Register java_thread) { 1146 ldr(oop_result, Address(java_thread, JavaThread::vm_result_oop_offset())); 1147 str(zr, Address(java_thread, JavaThread::vm_result_oop_offset())); 1148 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1149 } 1150 1151 void MacroAssembler::get_vm_result_metadata(Register metadata_result, Register java_thread) { 1152 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_metadata_offset())); 1153 str(zr, Address(java_thread, JavaThread::vm_result_metadata_offset())); 1154 } 1155 1156 void MacroAssembler::align(int modulus) { 1157 align(modulus, offset()); 1158 } 1159 1160 // Ensure that the code at target bytes offset from the current offset() is aligned 1161 // according to modulus. 1162 void MacroAssembler::align(int modulus, int target) { 1163 int delta = target - offset(); 1164 while ((offset() + delta) % modulus != 0) nop(); 1165 } 1166 1167 void MacroAssembler::post_call_nop() { 1168 if (!Continuations::enabled()) { 1169 return; 1170 } 1171 InstructionMark im(this); 1172 relocate(post_call_nop_Relocation::spec()); 1173 InlineSkippedInstructionsCounter skipCounter(this); 1174 nop(); 1175 movk(zr, 0); 1176 movk(zr, 0); 1177 } 1178 1179 // these are no-ops overridden by InterpreterMacroAssembler 1180 1181 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1182 1183 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1184 1185 // Look up the method for a megamorphic invokeinterface call. 1186 // The target method is determined by <intf_klass, itable_index>. 1187 // The receiver klass is in recv_klass. 1188 // On success, the result will be in method_result, and execution falls through. 1189 // On failure, execution transfers to the given label. 1190 void MacroAssembler::lookup_interface_method(Register recv_klass, 1191 Register intf_klass, 1192 RegisterOrConstant itable_index, 1193 Register method_result, 1194 Register scan_temp, 1195 Label& L_no_such_interface, 1196 bool return_method) { 1197 assert_different_registers(recv_klass, intf_klass, scan_temp); 1198 assert_different_registers(method_result, intf_klass, scan_temp); 1199 assert(recv_klass != method_result || !return_method, 1200 "recv_klass can be destroyed when method isn't needed"); 1201 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1202 "caller must use same register for non-constant itable index as for method"); 1203 1204 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1205 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1206 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1207 int scan_step = itableOffsetEntry::size() * wordSize; 1208 int vte_size = vtableEntry::size_in_bytes(); 1209 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1210 1211 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1212 1213 // Could store the aligned, prescaled offset in the klass. 1214 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1215 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1216 add(scan_temp, scan_temp, vtable_base); 1217 1218 if (return_method) { 1219 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1220 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1221 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1222 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1223 if (itentry_off) 1224 add(recv_klass, recv_klass, itentry_off); 1225 } 1226 1227 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1228 // if (scan->interface() == intf) { 1229 // result = (klass + scan->offset() + itable_index); 1230 // } 1231 // } 1232 Label search, found_method; 1233 1234 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1235 cmp(intf_klass, method_result); 1236 br(Assembler::EQ, found_method); 1237 bind(search); 1238 // Check that the previous entry is non-null. A null entry means that 1239 // the receiver class doesn't implement the interface, and wasn't the 1240 // same as when the caller was compiled. 1241 cbz(method_result, L_no_such_interface); 1242 if (itableOffsetEntry::interface_offset() != 0) { 1243 add(scan_temp, scan_temp, scan_step); 1244 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1245 } else { 1246 ldr(method_result, Address(pre(scan_temp, scan_step))); 1247 } 1248 cmp(intf_klass, method_result); 1249 br(Assembler::NE, search); 1250 1251 bind(found_method); 1252 1253 // Got a hit. 1254 if (return_method) { 1255 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1256 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1257 } 1258 } 1259 1260 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1261 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1262 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1263 // The target method is determined by <holder_klass, itable_index>. 1264 // The receiver klass is in recv_klass. 1265 // On success, the result will be in method_result, and execution falls through. 1266 // On failure, execution transfers to the given label. 1267 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1268 Register holder_klass, 1269 Register resolved_klass, 1270 Register method_result, 1271 Register temp_itbl_klass, 1272 Register scan_temp, 1273 int itable_index, 1274 Label& L_no_such_interface) { 1275 // 'method_result' is only used as output register at the very end of this method. 1276 // Until then we can reuse it as 'holder_offset'. 1277 Register holder_offset = method_result; 1278 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1279 1280 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1281 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1282 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1283 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1284 1285 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1286 1287 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1288 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1289 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1290 // temp_itbl_klass = itable[0]._interface; 1291 int vtblEntrySize = vtableEntry::size_in_bytes(); 1292 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1293 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1294 mov(holder_offset, zr); 1295 // scan_temp = &(itable[0]._interface) 1296 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1297 1298 // Initial checks: 1299 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1300 // - if (itable[0] == holder_klass), shortcut to "holder found" 1301 // - if (itable[0] == 0), no such interface 1302 cmp(resolved_klass, holder_klass); 1303 br(Assembler::NE, L_loop_search_resolved_entry); 1304 cmp(holder_klass, temp_itbl_klass); 1305 br(Assembler::EQ, L_holder_found); 1306 cbz(temp_itbl_klass, L_no_such_interface); 1307 1308 // Loop: Look for holder_klass record in itable 1309 // do { 1310 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1311 // if (temp_itbl_klass == holder_klass) { 1312 // goto L_holder_found; // Found! 1313 // } 1314 // } while (temp_itbl_klass != 0); 1315 // goto L_no_such_interface // Not found. 1316 Label L_search_holder; 1317 bind(L_search_holder); 1318 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1319 cmp(holder_klass, temp_itbl_klass); 1320 br(Assembler::EQ, L_holder_found); 1321 cbnz(temp_itbl_klass, L_search_holder); 1322 1323 b(L_no_such_interface); 1324 1325 // Loop: Look for resolved_class record in itable 1326 // while (true) { 1327 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1328 // if (temp_itbl_klass == 0) { 1329 // goto L_no_such_interface; 1330 // } 1331 // if (temp_itbl_klass == resolved_klass) { 1332 // goto L_resolved_found; // Found! 1333 // } 1334 // if (temp_itbl_klass == holder_klass) { 1335 // holder_offset = scan_temp; 1336 // } 1337 // } 1338 // 1339 Label L_loop_search_resolved; 1340 bind(L_loop_search_resolved); 1341 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1342 bind(L_loop_search_resolved_entry); 1343 cbz(temp_itbl_klass, L_no_such_interface); 1344 cmp(resolved_klass, temp_itbl_klass); 1345 br(Assembler::EQ, L_resolved_found); 1346 cmp(holder_klass, temp_itbl_klass); 1347 br(Assembler::NE, L_loop_search_resolved); 1348 mov(holder_offset, scan_temp); 1349 b(L_loop_search_resolved); 1350 1351 // See if we already have a holder klass. If not, go and scan for it. 1352 bind(L_resolved_found); 1353 cbz(holder_offset, L_search_holder); 1354 mov(scan_temp, holder_offset); 1355 1356 // Finally, scan_temp contains holder_klass vtable offset 1357 bind(L_holder_found); 1358 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1359 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1360 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1361 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1362 } 1363 1364 // virtual method calling 1365 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1366 RegisterOrConstant vtable_index, 1367 Register method_result) { 1368 assert(vtableEntry::size() * wordSize == 8, 1369 "adjust the scaling in the code below"); 1370 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1371 1372 if (vtable_index.is_register()) { 1373 lea(method_result, Address(recv_klass, 1374 vtable_index.as_register(), 1375 Address::lsl(LogBytesPerWord))); 1376 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1377 } else { 1378 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1379 ldr(method_result, 1380 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1381 } 1382 } 1383 1384 void MacroAssembler::check_klass_subtype(Register sub_klass, 1385 Register super_klass, 1386 Register temp_reg, 1387 Label& L_success) { 1388 Label L_failure; 1389 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1390 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1391 bind(L_failure); 1392 } 1393 1394 1395 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1396 Register super_klass, 1397 Register temp_reg, 1398 Label* L_success, 1399 Label* L_failure, 1400 Label* L_slow_path, 1401 Register super_check_offset) { 1402 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset); 1403 bool must_load_sco = ! super_check_offset->is_valid(); 1404 if (must_load_sco) { 1405 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1406 } 1407 1408 Label L_fallthrough; 1409 int label_nulls = 0; 1410 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1411 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1412 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1413 assert(label_nulls <= 1, "at most one null in the batch"); 1414 1415 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1416 Address super_check_offset_addr(super_klass, sco_offset); 1417 1418 // Hacked jmp, which may only be used just before L_fallthrough. 1419 #define final_jmp(label) \ 1420 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1421 else b(label) /*omit semi*/ 1422 1423 // If the pointers are equal, we are done (e.g., String[] elements). 1424 // This self-check enables sharing of secondary supertype arrays among 1425 // non-primary types such as array-of-interface. Otherwise, each such 1426 // type would need its own customized SSA. 1427 // We move this check to the front of the fast path because many 1428 // type checks are in fact trivially successful in this manner, 1429 // so we get a nicely predicted branch right at the start of the check. 1430 cmp(sub_klass, super_klass); 1431 br(Assembler::EQ, *L_success); 1432 1433 // Check the supertype display: 1434 if (must_load_sco) { 1435 ldrw(temp_reg, super_check_offset_addr); 1436 super_check_offset = temp_reg; 1437 } 1438 1439 Address super_check_addr(sub_klass, super_check_offset); 1440 ldr(rscratch1, super_check_addr); 1441 cmp(super_klass, rscratch1); // load displayed supertype 1442 br(Assembler::EQ, *L_success); 1443 1444 // This check has worked decisively for primary supers. 1445 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1446 // (Secondary supers are interfaces and very deeply nested subtypes.) 1447 // This works in the same check above because of a tricky aliasing 1448 // between the super_cache and the primary super display elements. 1449 // (The 'super_check_addr' can address either, as the case requires.) 1450 // Note that the cache is updated below if it does not help us find 1451 // what we need immediately. 1452 // So if it was a primary super, we can just fail immediately. 1453 // Otherwise, it's the slow path for us (no success at this point). 1454 1455 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset())); 1456 if (L_failure == &L_fallthrough) { 1457 cbz(rscratch1, *L_slow_path); 1458 } else { 1459 cbnz(rscratch1, *L_failure); 1460 final_jmp(*L_slow_path); 1461 } 1462 1463 bind(L_fallthrough); 1464 1465 #undef final_jmp 1466 } 1467 1468 // These two are taken from x86, but they look generally useful 1469 1470 // scans count pointer sized words at [addr] for occurrence of value, 1471 // generic 1472 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1473 Register scratch) { 1474 Label Lloop, Lexit; 1475 cbz(count, Lexit); 1476 bind(Lloop); 1477 ldr(scratch, post(addr, wordSize)); 1478 cmp(value, scratch); 1479 br(EQ, Lexit); 1480 sub(count, count, 1); 1481 cbnz(count, Lloop); 1482 bind(Lexit); 1483 } 1484 1485 // scans count 4 byte words at [addr] for occurrence of value, 1486 // generic 1487 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1488 Register scratch) { 1489 Label Lloop, Lexit; 1490 cbz(count, Lexit); 1491 bind(Lloop); 1492 ldrw(scratch, post(addr, wordSize)); 1493 cmpw(value, scratch); 1494 br(EQ, Lexit); 1495 sub(count, count, 1); 1496 cbnz(count, Lloop); 1497 bind(Lexit); 1498 } 1499 1500 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 1501 Register super_klass, 1502 Register temp_reg, 1503 Register temp2_reg, 1504 Label* L_success, 1505 Label* L_failure, 1506 bool set_cond_codes) { 1507 // NB! Callers may assume that, when temp2_reg is a valid register, 1508 // this code sets it to a nonzero value. 1509 1510 assert_different_registers(sub_klass, super_klass, temp_reg); 1511 if (temp2_reg != noreg) 1512 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1513 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1514 1515 Label L_fallthrough; 1516 int label_nulls = 0; 1517 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1518 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1519 assert(label_nulls <= 1, "at most one null in the batch"); 1520 1521 // a couple of useful fields in sub_klass: 1522 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1523 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1524 Address secondary_supers_addr(sub_klass, ss_offset); 1525 Address super_cache_addr( sub_klass, sc_offset); 1526 1527 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1528 1529 // Do a linear scan of the secondary super-klass chain. 1530 // This code is rarely used, so simplicity is a virtue here. 1531 // The repne_scan instruction uses fixed registers, which we must spill. 1532 // Don't worry too much about pre-existing connections with the input regs. 1533 1534 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1535 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1536 1537 RegSet pushed_registers; 1538 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1539 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1540 1541 if (super_klass != r0) { 1542 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1543 } 1544 1545 push(pushed_registers, sp); 1546 1547 // Get super_klass value into r0 (even if it was in r5 or r2). 1548 if (super_klass != r0) { 1549 mov(r0, super_klass); 1550 } 1551 1552 #ifndef PRODUCT 1553 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1554 #endif //PRODUCT 1555 1556 // We will consult the secondary-super array. 1557 ldr(r5, secondary_supers_addr); 1558 // Load the array length. 1559 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1560 // Skip to start of data. 1561 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1562 1563 cmp(sp, zr); // Clear Z flag; SP is never zero 1564 // Scan R2 words at [R5] for an occurrence of R0. 1565 // Set NZ/Z based on last compare. 1566 repne_scan(r5, r0, r2, rscratch1); 1567 1568 // Unspill the temp. registers: 1569 pop(pushed_registers, sp); 1570 1571 br(Assembler::NE, *L_failure); 1572 1573 // Success. Cache the super we found and proceed in triumph. 1574 1575 if (UseSecondarySupersCache) { 1576 str(super_klass, super_cache_addr); 1577 } 1578 1579 if (L_success != &L_fallthrough) { 1580 b(*L_success); 1581 } 1582 1583 #undef IS_A_TEMP 1584 1585 bind(L_fallthrough); 1586 } 1587 1588 // If Register r is invalid, remove a new register from 1589 // available_regs, and add new register to regs_to_push. 1590 Register MacroAssembler::allocate_if_noreg(Register r, 1591 RegSetIterator<Register> &available_regs, 1592 RegSet ®s_to_push) { 1593 if (!r->is_valid()) { 1594 r = *available_regs++; 1595 regs_to_push += r; 1596 } 1597 return r; 1598 } 1599 1600 // check_klass_subtype_slow_path_table() looks for super_klass in the 1601 // hash table belonging to super_klass, branching to L_success or 1602 // L_failure as appropriate. This is essentially a shim which 1603 // allocates registers as necessary then calls 1604 // lookup_secondary_supers_table() to do the work. Any of the temp 1605 // regs may be noreg, in which case this logic will chooses some 1606 // registers push and pop them from the stack. 1607 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 1608 Register super_klass, 1609 Register temp_reg, 1610 Register temp2_reg, 1611 Register temp3_reg, 1612 Register result_reg, 1613 FloatRegister vtemp, 1614 Label* L_success, 1615 Label* L_failure, 1616 bool set_cond_codes) { 1617 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 1618 1619 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1620 1621 Label L_fallthrough; 1622 int label_nulls = 0; 1623 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1624 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1625 assert(label_nulls <= 1, "at most one null in the batch"); 1626 1627 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1628 1629 RegSetIterator<Register> available_regs 1630 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin(); 1631 1632 RegSet pushed_regs; 1633 1634 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 1635 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 1636 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 1637 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 1638 1639 push(pushed_regs, sp); 1640 1641 lookup_secondary_supers_table_var(sub_klass, 1642 super_klass, 1643 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg, 1644 nullptr); 1645 cmp(result_reg, zr); 1646 1647 // Unspill the temp. registers: 1648 pop(pushed_regs, sp); 1649 1650 // NB! Callers may assume that, when set_cond_codes is true, this 1651 // code sets temp2_reg to a nonzero value. 1652 if (set_cond_codes) { 1653 mov(temp2_reg, 1); 1654 } 1655 1656 br(Assembler::NE, *L_failure); 1657 1658 if (L_success != &L_fallthrough) { 1659 b(*L_success); 1660 } 1661 1662 bind(L_fallthrough); 1663 } 1664 1665 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1666 Register super_klass, 1667 Register temp_reg, 1668 Register temp2_reg, 1669 Label* L_success, 1670 Label* L_failure, 1671 bool set_cond_codes) { 1672 if (UseSecondarySupersTable) { 1673 check_klass_subtype_slow_path_table 1674 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg, 1675 /*vtemp*/fnoreg, 1676 L_success, L_failure, set_cond_codes); 1677 } else { 1678 check_klass_subtype_slow_path_linear 1679 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes); 1680 } 1681 } 1682 1683 1684 // Ensure that the inline code and the stub are using the same registers. 1685 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1686 do { \ 1687 assert(r_super_klass == r0 && \ 1688 r_array_base == r1 && \ 1689 r_array_length == r2 && \ 1690 (r_array_index == r3 || r_array_index == noreg) && \ 1691 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1692 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1693 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1694 } while(0) 1695 1696 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 1697 Register r_super_klass, 1698 Register temp1, 1699 Register temp2, 1700 Register temp3, 1701 FloatRegister vtemp, 1702 Register result, 1703 u1 super_klass_slot, 1704 bool stub_is_near) { 1705 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1706 1707 Label L_fallthrough; 1708 1709 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1710 1711 const Register 1712 r_array_base = temp1, // r1 1713 r_array_length = temp2, // r2 1714 r_array_index = temp3, // r3 1715 r_bitmap = rscratch2; 1716 1717 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1718 1719 u1 bit = super_klass_slot; 1720 1721 // Make sure that result is nonzero if the TBZ below misses. 1722 mov(result, 1); 1723 1724 // We're going to need the bitmap in a vector reg and in a core reg, 1725 // so load both now. 1726 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1727 if (bit != 0) { 1728 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1729 } 1730 // First check the bitmap to see if super_klass might be present. If 1731 // the bit is zero, we are certain that super_klass is not one of 1732 // the secondary supers. 1733 tbz(r_bitmap, bit, L_fallthrough); 1734 1735 // Get the first array index that can contain super_klass into r_array_index. 1736 if (bit != 0) { 1737 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1738 cnt(vtemp, T8B, vtemp); 1739 addv(vtemp, T8B, vtemp); 1740 fmovd(r_array_index, vtemp); 1741 } else { 1742 mov(r_array_index, (u1)1); 1743 } 1744 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1745 1746 // We will consult the secondary-super array. 1747 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1748 1749 // The value i in r_array_index is >= 1, so even though r_array_base 1750 // points to the length, we don't need to adjust it to point to the 1751 // data. 1752 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1753 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1754 1755 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1756 eor(result, result, r_super_klass); 1757 cbz(result, L_fallthrough); // Found a match 1758 1759 // Is there another entry to check? Consult the bitmap. 1760 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1761 1762 // Linear probe. 1763 if (bit != 0) { 1764 ror(r_bitmap, r_bitmap, bit); 1765 } 1766 1767 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1768 // The next slot to be inspected, by the stub we're about to call, 1769 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1770 // have been checked. 1771 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1772 if (stub_is_near) { 1773 bl(stub); 1774 } else { 1775 address call = trampoline_call(stub); 1776 if (call == nullptr) { 1777 return false; // trampoline allocation failed 1778 } 1779 } 1780 1781 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1782 1783 bind(L_fallthrough); 1784 1785 if (VerifySecondarySupers) { 1786 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1787 temp1, temp2, result); // r1, r2, r5 1788 } 1789 return true; 1790 } 1791 1792 // At runtime, return 0 in result if r_super_klass is a superclass of 1793 // r_sub_klass, otherwise return nonzero. Use this version of 1794 // lookup_secondary_supers_table() if you don't know ahead of time 1795 // which superclass will be searched for. Used by interpreter and 1796 // runtime stubs. It is larger and has somewhat greater latency than 1797 // the version above, which takes a constant super_klass_slot. 1798 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 1799 Register r_super_klass, 1800 Register temp1, 1801 Register temp2, 1802 Register temp3, 1803 FloatRegister vtemp, 1804 Register result, 1805 Label *L_success) { 1806 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1807 1808 Label L_fallthrough; 1809 1810 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1811 1812 const Register 1813 r_array_index = temp3, 1814 slot = rscratch1, 1815 r_bitmap = rscratch2; 1816 1817 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 1818 1819 // Make sure that result is nonzero if the test below misses. 1820 mov(result, 1); 1821 1822 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1823 1824 // First check the bitmap to see if super_klass might be present. If 1825 // the bit is zero, we are certain that super_klass is not one of 1826 // the secondary supers. 1827 1828 // This next instruction is equivalent to: 1829 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); 1830 // sub(temp2, tmp_reg, slot); 1831 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); 1832 lslv(temp2, r_bitmap, temp2); 1833 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough); 1834 1835 bool must_save_v0 = (vtemp == fnoreg); 1836 if (must_save_v0) { 1837 // temp1 and result are free, so use them to preserve vtemp 1838 vtemp = v0; 1839 mov(temp1, vtemp, D, 0); 1840 mov(result, vtemp, D, 1); 1841 } 1842 1843 // Get the first array index that can contain super_klass into r_array_index. 1844 mov(vtemp, D, 0, temp2); 1845 cnt(vtemp, T8B, vtemp); 1846 addv(vtemp, T8B, vtemp); 1847 mov(r_array_index, vtemp, D, 0); 1848 1849 if (must_save_v0) { 1850 mov(vtemp, D, 0, temp1 ); 1851 mov(vtemp, D, 1, result); 1852 } 1853 1854 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1855 1856 const Register 1857 r_array_base = temp1, 1858 r_array_length = temp2; 1859 1860 // The value i in r_array_index is >= 1, so even though r_array_base 1861 // points to the length, we don't need to adjust it to point to the 1862 // data. 1863 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1864 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1865 1866 // We will consult the secondary-super array. 1867 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1868 1869 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1870 eor(result, result, r_super_klass); 1871 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match 1872 1873 // Is there another entry to check? Consult the bitmap. 1874 rorv(r_bitmap, r_bitmap, slot); 1875 // rol(r_bitmap, r_bitmap, 1); 1876 tbz(r_bitmap, 1, L_fallthrough); 1877 1878 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1879 // The next slot to be inspected, by the logic we're about to call, 1880 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1881 // have been checked. 1882 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index, 1883 r_bitmap, r_array_length, result, /*is_stub*/false); 1884 1885 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1886 1887 bind(L_fallthrough); 1888 1889 if (VerifySecondarySupers) { 1890 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1891 temp1, temp2, result); // r1, r2, r5 1892 } 1893 1894 if (L_success) { 1895 cbz(result, *L_success); 1896 } 1897 } 1898 1899 // Called by code generated by check_klass_subtype_slow_path 1900 // above. This is called when there is a collision in the hashed 1901 // lookup in the secondary supers array. 1902 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1903 Register r_array_base, 1904 Register r_array_index, 1905 Register r_bitmap, 1906 Register temp1, 1907 Register result, 1908 bool is_stub) { 1909 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1910 1911 const Register 1912 r_array_length = temp1, 1913 r_sub_klass = noreg; // unused 1914 1915 if (is_stub) { 1916 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1917 } 1918 1919 Label L_fallthrough, L_huge; 1920 1921 // Load the array length. 1922 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1923 // And adjust the array base to point to the data. 1924 // NB! Effectively increments current slot index by 1. 1925 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1926 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1927 1928 // The bitmap is full to bursting. 1929 // Implicit invariant: BITMAP_FULL implies (length > 0) 1930 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1931 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2)); 1932 br(GT, L_huge); 1933 1934 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1935 // current slot (at secondary_supers[r_array_index]) has not yet 1936 // been inspected, and r_array_index may be out of bounds if we 1937 // wrapped around the end of the array. 1938 1939 { // This is conventional linear probing, but instead of terminating 1940 // when a null entry is found in the table, we maintain a bitmap 1941 // in which a 0 indicates missing entries. 1942 // As long as the bitmap is not completely full, 1943 // array_length == popcount(bitmap). The array_length check above 1944 // guarantees there are 0s in the bitmap, so the loop eventually 1945 // terminates. 1946 Label L_loop; 1947 bind(L_loop); 1948 1949 // Check for wraparound. 1950 cmp(r_array_index, r_array_length); 1951 csel(r_array_index, zr, r_array_index, GE); 1952 1953 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1954 eor(result, rscratch1, r_super_klass); 1955 cbz(result, L_fallthrough); 1956 1957 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1958 1959 ror(r_bitmap, r_bitmap, 1); 1960 add(r_array_index, r_array_index, 1); 1961 b(L_loop); 1962 } 1963 1964 { // Degenerate case: more than 64 secondary supers. 1965 // FIXME: We could do something smarter here, maybe a vectorized 1966 // comparison or a binary search, but is that worth any added 1967 // complexity? 1968 bind(L_huge); 1969 cmp(sp, zr); // Clear Z flag; SP is never zero 1970 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1971 cset(result, NE); // result == 0 iff we got a match. 1972 } 1973 1974 bind(L_fallthrough); 1975 } 1976 1977 // Make sure that the hashed lookup and a linear scan agree. 1978 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1979 Register r_super_klass, 1980 Register temp1, 1981 Register temp2, 1982 Register result) { 1983 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1984 1985 const Register 1986 r_array_base = temp1, 1987 r_array_length = temp2, 1988 r_array_index = noreg, // unused 1989 r_bitmap = noreg; // unused 1990 1991 BLOCK_COMMENT("verify_secondary_supers_table {"); 1992 1993 // We will consult the secondary-super array. 1994 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1995 1996 // Load the array length. 1997 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1998 // And adjust the array base to point to the data. 1999 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 2000 2001 cmp(sp, zr); // Clear Z flag; SP is never zero 2002 // Scan R2 words at [R5] for an occurrence of R0. 2003 // Set NZ/Z based on last compare. 2004 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 2005 // rscratch1 == 0 iff we got a match. 2006 cset(rscratch1, NE); 2007 2008 Label passed; 2009 cmp(result, zr); 2010 cset(result, NE); // normalize result to 0/1 for comparison 2011 2012 cmp(rscratch1, result); 2013 br(EQ, passed); 2014 { 2015 mov(r0, r_super_klass); // r0 <- r0 2016 mov(r1, r_sub_klass); // r1 <- r4 2017 mov(r2, /*expected*/rscratch1); // r2 <- r8 2018 mov(r3, result); // r3 <- r5 2019 mov(r4, (address)("mismatch")); // r4 <- const 2020 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 2021 should_not_reach_here(); 2022 } 2023 bind(passed); 2024 2025 BLOCK_COMMENT("} verify_secondary_supers_table"); 2026 } 2027 2028 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 2029 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 2030 assert_different_registers(klass, rthread, scratch); 2031 2032 Label L_fallthrough, L_tmp; 2033 if (L_fast_path == nullptr) { 2034 L_fast_path = &L_fallthrough; 2035 } else if (L_slow_path == nullptr) { 2036 L_slow_path = &L_fallthrough; 2037 } 2038 // Fast path check: class is fully initialized 2039 lea(scratch, Address(klass, InstanceKlass::init_state_offset())); 2040 ldarb(scratch, scratch); 2041 cmp(scratch, InstanceKlass::fully_initialized); 2042 br(Assembler::EQ, *L_fast_path); 2043 2044 // Fast path check: current thread is initializer thread 2045 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 2046 cmp(rthread, scratch); 2047 2048 if (L_slow_path == &L_fallthrough) { 2049 br(Assembler::EQ, *L_fast_path); 2050 bind(*L_slow_path); 2051 } else if (L_fast_path == &L_fallthrough) { 2052 br(Assembler::NE, *L_slow_path); 2053 bind(*L_fast_path); 2054 } else { 2055 Unimplemented(); 2056 } 2057 } 2058 2059 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 2060 if (!VerifyOops) return; 2061 2062 // Pass register number to verify_oop_subroutine 2063 const char* b = nullptr; 2064 { 2065 ResourceMark rm; 2066 stringStream ss; 2067 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 2068 b = code_string(ss.as_string()); 2069 } 2070 BLOCK_COMMENT("verify_oop {"); 2071 2072 strip_return_address(); // This might happen within a stack frame. 2073 protect_return_address(); 2074 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 2075 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 2076 2077 mov(r0, reg); 2078 movptr(rscratch1, (uintptr_t)(address)b); 2079 2080 // call indirectly to solve generation ordering problem 2081 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 2082 ldr(rscratch2, Address(rscratch2)); 2083 blr(rscratch2); 2084 2085 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 2086 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 2087 authenticate_return_address(); 2088 2089 BLOCK_COMMENT("} verify_oop"); 2090 } 2091 2092 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 2093 if (!VerifyOops) return; 2094 2095 const char* b = nullptr; 2096 { 2097 ResourceMark rm; 2098 stringStream ss; 2099 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 2100 b = code_string(ss.as_string()); 2101 } 2102 BLOCK_COMMENT("verify_oop_addr {"); 2103 2104 strip_return_address(); // This might happen within a stack frame. 2105 protect_return_address(); 2106 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 2107 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 2108 2109 // addr may contain sp so we will have to adjust it based on the 2110 // pushes that we just did. 2111 if (addr.uses(sp)) { 2112 lea(r0, addr); 2113 ldr(r0, Address(r0, 4 * wordSize)); 2114 } else { 2115 ldr(r0, addr); 2116 } 2117 movptr(rscratch1, (uintptr_t)(address)b); 2118 2119 // call indirectly to solve generation ordering problem 2120 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 2121 ldr(rscratch2, Address(rscratch2)); 2122 blr(rscratch2); 2123 2124 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 2125 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 2126 authenticate_return_address(); 2127 2128 BLOCK_COMMENT("} verify_oop_addr"); 2129 } 2130 2131 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2132 int extra_slot_offset) { 2133 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2134 int stackElementSize = Interpreter::stackElementSize; 2135 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 2136 #ifdef ASSERT 2137 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 2138 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 2139 #endif 2140 if (arg_slot.is_constant()) { 2141 return Address(esp, arg_slot.as_constant() * stackElementSize 2142 + offset); 2143 } else { 2144 add(rscratch1, esp, arg_slot.as_register(), 2145 ext::uxtx, exact_log2(stackElementSize)); 2146 return Address(rscratch1, offset); 2147 } 2148 } 2149 2150 void MacroAssembler::call_VM_leaf_base(address entry_point, 2151 int number_of_arguments, 2152 Label *retaddr) { 2153 Label E, L; 2154 2155 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 2156 2157 mov(rscratch1, entry_point); 2158 blr(rscratch1); 2159 if (retaddr) 2160 bind(*retaddr); 2161 2162 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 2163 } 2164 2165 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 2166 call_VM_leaf_base(entry_point, number_of_arguments); 2167 } 2168 2169 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2170 pass_arg0(this, arg_0); 2171 call_VM_leaf_base(entry_point, 1); 2172 } 2173 2174 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2175 assert_different_registers(arg_1, c_rarg0); 2176 pass_arg0(this, arg_0); 2177 pass_arg1(this, arg_1); 2178 call_VM_leaf_base(entry_point, 2); 2179 } 2180 2181 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2182 Register arg_1, Register arg_2) { 2183 assert_different_registers(arg_1, c_rarg0); 2184 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2185 pass_arg0(this, arg_0); 2186 pass_arg1(this, arg_1); 2187 pass_arg2(this, arg_2); 2188 call_VM_leaf_base(entry_point, 3); 2189 } 2190 2191 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2192 pass_arg0(this, arg_0); 2193 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2194 } 2195 2196 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2197 2198 assert_different_registers(arg_0, c_rarg1); 2199 pass_arg1(this, arg_1); 2200 pass_arg0(this, arg_0); 2201 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2202 } 2203 2204 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2205 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2206 assert_different_registers(arg_1, c_rarg2); 2207 pass_arg2(this, arg_2); 2208 pass_arg1(this, arg_1); 2209 pass_arg0(this, arg_0); 2210 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2211 } 2212 2213 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2214 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2215 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2216 assert_different_registers(arg_2, c_rarg3); 2217 pass_arg3(this, arg_3); 2218 pass_arg2(this, arg_2); 2219 pass_arg1(this, arg_1); 2220 pass_arg0(this, arg_0); 2221 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2222 } 2223 2224 void MacroAssembler::null_check(Register reg, int offset) { 2225 if (needs_explicit_null_check(offset)) { 2226 // provoke OS null exception if reg is null by 2227 // accessing M[reg] w/o changing any registers 2228 // NOTE: this is plenty to provoke a segv 2229 ldr(zr, Address(reg)); 2230 } else { 2231 // nothing to do, (later) access of M[reg + offset] 2232 // will provoke OS null exception if reg is null 2233 } 2234 } 2235 2236 // MacroAssembler protected routines needed to implement 2237 // public methods 2238 2239 void MacroAssembler::mov(Register r, Address dest) { 2240 code_section()->relocate(pc(), dest.rspec()); 2241 uint64_t imm64 = (uint64_t)dest.target(); 2242 movptr(r, imm64); 2243 } 2244 2245 // Move a constant pointer into r. In AArch64 mode the virtual 2246 // address space is 48 bits in size, so we only need three 2247 // instructions to create a patchable instruction sequence that can 2248 // reach anywhere. 2249 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2250 #ifndef PRODUCT 2251 { 2252 char buffer[64]; 2253 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2254 block_comment(buffer); 2255 } 2256 #endif 2257 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2258 movz(r, imm64 & 0xffff); 2259 imm64 >>= 16; 2260 movk(r, imm64 & 0xffff, 16); 2261 imm64 >>= 16; 2262 movk(r, imm64 & 0xffff, 32); 2263 } 2264 2265 // Macro to mov replicated immediate to vector register. 2266 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2267 // the upper 56/48/32 bits must be zeros for B/H/S type. 2268 // Vd will get the following values for different arrangements in T 2269 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2270 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2271 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2272 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2273 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2274 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2275 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2276 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2277 // Clobbers rscratch1 2278 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2279 assert(T != T1Q, "unsupported"); 2280 if (T == T1D || T == T2D) { 2281 int imm = operand_valid_for_movi_immediate(imm64, T); 2282 if (-1 != imm) { 2283 movi(Vd, T, imm); 2284 } else { 2285 mov(rscratch1, imm64); 2286 dup(Vd, T, rscratch1); 2287 } 2288 return; 2289 } 2290 2291 #ifdef ASSERT 2292 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2293 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2294 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2295 #endif 2296 int shift = operand_valid_for_movi_immediate(imm64, T); 2297 uint32_t imm32 = imm64 & 0xffffffffULL; 2298 if (shift >= 0) { 2299 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2300 } else { 2301 movw(rscratch1, imm32); 2302 dup(Vd, T, rscratch1); 2303 } 2304 } 2305 2306 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2307 { 2308 #ifndef PRODUCT 2309 { 2310 char buffer[64]; 2311 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2312 block_comment(buffer); 2313 } 2314 #endif 2315 if (operand_valid_for_logical_immediate(false, imm64)) { 2316 orr(dst, zr, imm64); 2317 } else { 2318 // we can use a combination of MOVZ or MOVN with 2319 // MOVK to build up the constant 2320 uint64_t imm_h[4]; 2321 int zero_count = 0; 2322 int neg_count = 0; 2323 int i; 2324 for (i = 0; i < 4; i++) { 2325 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2326 if (imm_h[i] == 0) { 2327 zero_count++; 2328 } else if (imm_h[i] == 0xffffL) { 2329 neg_count++; 2330 } 2331 } 2332 if (zero_count == 4) { 2333 // one MOVZ will do 2334 movz(dst, 0); 2335 } else if (neg_count == 4) { 2336 // one MOVN will do 2337 movn(dst, 0); 2338 } else if (zero_count == 3) { 2339 for (i = 0; i < 4; i++) { 2340 if (imm_h[i] != 0L) { 2341 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2342 break; 2343 } 2344 } 2345 } else if (neg_count == 3) { 2346 // one MOVN will do 2347 for (int i = 0; i < 4; i++) { 2348 if (imm_h[i] != 0xffffL) { 2349 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2350 break; 2351 } 2352 } 2353 } else if (zero_count == 2) { 2354 // one MOVZ and one MOVK will do 2355 for (i = 0; i < 3; i++) { 2356 if (imm_h[i] != 0L) { 2357 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2358 i++; 2359 break; 2360 } 2361 } 2362 for (;i < 4; i++) { 2363 if (imm_h[i] != 0L) { 2364 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2365 } 2366 } 2367 } else if (neg_count == 2) { 2368 // one MOVN and one MOVK will do 2369 for (i = 0; i < 4; i++) { 2370 if (imm_h[i] != 0xffffL) { 2371 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2372 i++; 2373 break; 2374 } 2375 } 2376 for (;i < 4; i++) { 2377 if (imm_h[i] != 0xffffL) { 2378 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2379 } 2380 } 2381 } else if (zero_count == 1) { 2382 // one MOVZ and two MOVKs will do 2383 for (i = 0; i < 4; i++) { 2384 if (imm_h[i] != 0L) { 2385 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2386 i++; 2387 break; 2388 } 2389 } 2390 for (;i < 4; i++) { 2391 if (imm_h[i] != 0x0L) { 2392 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2393 } 2394 } 2395 } else if (neg_count == 1) { 2396 // one MOVN and two MOVKs will do 2397 for (i = 0; i < 4; i++) { 2398 if (imm_h[i] != 0xffffL) { 2399 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2400 i++; 2401 break; 2402 } 2403 } 2404 for (;i < 4; i++) { 2405 if (imm_h[i] != 0xffffL) { 2406 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2407 } 2408 } 2409 } else { 2410 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2411 movz(dst, (uint32_t)imm_h[0], 0); 2412 for (i = 1; i < 4; i++) { 2413 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2414 } 2415 } 2416 } 2417 } 2418 2419 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2420 { 2421 #ifndef PRODUCT 2422 { 2423 char buffer[64]; 2424 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2425 block_comment(buffer); 2426 } 2427 #endif 2428 if (operand_valid_for_logical_immediate(true, imm32)) { 2429 orrw(dst, zr, imm32); 2430 } else { 2431 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2432 // constant 2433 uint32_t imm_h[2]; 2434 imm_h[0] = imm32 & 0xffff; 2435 imm_h[1] = ((imm32 >> 16) & 0xffff); 2436 if (imm_h[0] == 0) { 2437 movzw(dst, imm_h[1], 16); 2438 } else if (imm_h[0] == 0xffff) { 2439 movnw(dst, imm_h[1] ^ 0xffff, 16); 2440 } else if (imm_h[1] == 0) { 2441 movzw(dst, imm_h[0], 0); 2442 } else if (imm_h[1] == 0xffff) { 2443 movnw(dst, imm_h[0] ^ 0xffff, 0); 2444 } else { 2445 // use a MOVZ and MOVK (makes it easier to debug) 2446 movzw(dst, imm_h[0], 0); 2447 movkw(dst, imm_h[1], 16); 2448 } 2449 } 2450 } 2451 2452 // Form an address from base + offset in Rd. Rd may or may 2453 // not actually be used: you must use the Address that is returned. 2454 // It is up to you to ensure that the shift provided matches the size 2455 // of your data. 2456 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2457 if (Address::offset_ok_for_immed(byte_offset, shift)) 2458 // It fits; no need for any heroics 2459 return Address(base, byte_offset); 2460 2461 // Don't do anything clever with negative or misaligned offsets 2462 unsigned mask = (1 << shift) - 1; 2463 if (byte_offset < 0 || byte_offset & mask) { 2464 mov(Rd, byte_offset); 2465 add(Rd, base, Rd); 2466 return Address(Rd); 2467 } 2468 2469 // See if we can do this with two 12-bit offsets 2470 { 2471 uint64_t word_offset = byte_offset >> shift; 2472 uint64_t masked_offset = word_offset & 0xfff000; 2473 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2474 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2475 add(Rd, base, masked_offset << shift); 2476 word_offset -= masked_offset; 2477 return Address(Rd, word_offset << shift); 2478 } 2479 } 2480 2481 // Do it the hard way 2482 mov(Rd, byte_offset); 2483 add(Rd, base, Rd); 2484 return Address(Rd); 2485 } 2486 2487 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2488 bool want_remainder, Register scratch) 2489 { 2490 // Full implementation of Java idiv and irem. The function 2491 // returns the (pc) offset of the div instruction - may be needed 2492 // for implicit exceptions. 2493 // 2494 // constraint : ra/rb =/= scratch 2495 // normal case 2496 // 2497 // input : ra: dividend 2498 // rb: divisor 2499 // 2500 // result: either 2501 // quotient (= ra idiv rb) 2502 // remainder (= ra irem rb) 2503 2504 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2505 2506 int idivl_offset = offset(); 2507 if (! want_remainder) { 2508 sdivw(result, ra, rb); 2509 } else { 2510 sdivw(scratch, ra, rb); 2511 Assembler::msubw(result, scratch, rb, ra); 2512 } 2513 2514 return idivl_offset; 2515 } 2516 2517 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2518 bool want_remainder, Register scratch) 2519 { 2520 // Full implementation of Java ldiv and lrem. The function 2521 // returns the (pc) offset of the div instruction - may be needed 2522 // for implicit exceptions. 2523 // 2524 // constraint : ra/rb =/= scratch 2525 // normal case 2526 // 2527 // input : ra: dividend 2528 // rb: divisor 2529 // 2530 // result: either 2531 // quotient (= ra idiv rb) 2532 // remainder (= ra irem rb) 2533 2534 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2535 2536 int idivq_offset = offset(); 2537 if (! want_remainder) { 2538 sdiv(result, ra, rb); 2539 } else { 2540 sdiv(scratch, ra, rb); 2541 Assembler::msub(result, scratch, rb, ra); 2542 } 2543 2544 return idivq_offset; 2545 } 2546 2547 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2548 address prev = pc() - NativeMembar::instruction_size; 2549 address last = code()->last_insn(); 2550 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2551 NativeMembar *bar = NativeMembar_at(prev); 2552 if (AlwaysMergeDMB) { 2553 bar->set_kind(bar->get_kind() | order_constraint); 2554 BLOCK_COMMENT("merged membar(always)"); 2555 return; 2556 } 2557 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2558 // doing so would introduce a StoreLoad which the caller did not 2559 // intend 2560 if (bar->get_kind() == order_constraint 2561 || bar->get_kind() == AnyAny 2562 || order_constraint == AnyAny) { 2563 // We are merging two memory barrier instructions. On AArch64 we 2564 // can do this simply by ORing them together. 2565 bar->set_kind(bar->get_kind() | order_constraint); 2566 BLOCK_COMMENT("merged membar"); 2567 return; 2568 } else { 2569 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2570 // We need check the last 2 instructions 2571 address prev2 = prev - NativeMembar::instruction_size; 2572 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2573 NativeMembar *bar2 = NativeMembar_at(prev2); 2574 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2575 BLOCK_COMMENT("merged membar(elided)"); 2576 return; 2577 } 2578 } 2579 } 2580 code()->set_last_insn(pc()); 2581 dmb(Assembler::barrier(order_constraint)); 2582 } 2583 2584 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2585 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2586 merge_ldst(rt, adr, size_in_bytes, is_store); 2587 code()->clear_last_insn(); 2588 return true; 2589 } else { 2590 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2591 const uint64_t mask = size_in_bytes - 1; 2592 if (adr.getMode() == Address::base_plus_offset && 2593 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2594 code()->set_last_insn(pc()); 2595 } 2596 return false; 2597 } 2598 } 2599 2600 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2601 // We always try to merge two adjacent loads into one ldp. 2602 if (!try_merge_ldst(Rx, adr, 8, false)) { 2603 Assembler::ldr(Rx, adr); 2604 } 2605 } 2606 2607 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2608 // We always try to merge two adjacent loads into one ldp. 2609 if (!try_merge_ldst(Rw, adr, 4, false)) { 2610 Assembler::ldrw(Rw, adr); 2611 } 2612 } 2613 2614 void MacroAssembler::str(Register Rx, const Address &adr) { 2615 // We always try to merge two adjacent stores into one stp. 2616 if (!try_merge_ldst(Rx, adr, 8, true)) { 2617 Assembler::str(Rx, adr); 2618 } 2619 } 2620 2621 void MacroAssembler::strw(Register Rw, const Address &adr) { 2622 // We always try to merge two adjacent stores into one stp. 2623 if (!try_merge_ldst(Rw, adr, 4, true)) { 2624 Assembler::strw(Rw, adr); 2625 } 2626 } 2627 2628 // MacroAssembler routines found actually to be needed 2629 2630 void MacroAssembler::push(Register src) 2631 { 2632 str(src, Address(pre(esp, -1 * wordSize))); 2633 } 2634 2635 void MacroAssembler::pop(Register dst) 2636 { 2637 ldr(dst, Address(post(esp, 1 * wordSize))); 2638 } 2639 2640 // Note: load_unsigned_short used to be called load_unsigned_word. 2641 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2642 int off = offset(); 2643 ldrh(dst, src); 2644 return off; 2645 } 2646 2647 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2648 int off = offset(); 2649 ldrb(dst, src); 2650 return off; 2651 } 2652 2653 int MacroAssembler::load_signed_short(Register dst, Address src) { 2654 int off = offset(); 2655 ldrsh(dst, src); 2656 return off; 2657 } 2658 2659 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2660 int off = offset(); 2661 ldrsb(dst, src); 2662 return off; 2663 } 2664 2665 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2666 int off = offset(); 2667 ldrshw(dst, src); 2668 return off; 2669 } 2670 2671 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2672 int off = offset(); 2673 ldrsbw(dst, src); 2674 return off; 2675 } 2676 2677 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2678 switch (size_in_bytes) { 2679 case 8: ldr(dst, src); break; 2680 case 4: ldrw(dst, src); break; 2681 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2682 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2683 default: ShouldNotReachHere(); 2684 } 2685 } 2686 2687 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2688 switch (size_in_bytes) { 2689 case 8: str(src, dst); break; 2690 case 4: strw(src, dst); break; 2691 case 2: strh(src, dst); break; 2692 case 1: strb(src, dst); break; 2693 default: ShouldNotReachHere(); 2694 } 2695 } 2696 2697 void MacroAssembler::decrementw(Register reg, int value) 2698 { 2699 if (value < 0) { incrementw(reg, -value); return; } 2700 if (value == 0) { return; } 2701 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2702 /* else */ { 2703 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2704 movw(rscratch2, (unsigned)value); 2705 subw(reg, reg, rscratch2); 2706 } 2707 } 2708 2709 void MacroAssembler::decrement(Register reg, int value) 2710 { 2711 if (value < 0) { increment(reg, -value); return; } 2712 if (value == 0) { return; } 2713 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2714 /* else */ { 2715 assert(reg != rscratch2, "invalid dst for register decrement"); 2716 mov(rscratch2, (uint64_t)value); 2717 sub(reg, reg, rscratch2); 2718 } 2719 } 2720 2721 void MacroAssembler::decrementw(Address dst, int value) 2722 { 2723 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2724 if (dst.getMode() == Address::literal) { 2725 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2726 lea(rscratch2, dst); 2727 dst = Address(rscratch2); 2728 } 2729 ldrw(rscratch1, dst); 2730 decrementw(rscratch1, value); 2731 strw(rscratch1, dst); 2732 } 2733 2734 void MacroAssembler::decrement(Address dst, int value) 2735 { 2736 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2737 if (dst.getMode() == Address::literal) { 2738 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2739 lea(rscratch2, dst); 2740 dst = Address(rscratch2); 2741 } 2742 ldr(rscratch1, dst); 2743 decrement(rscratch1, value); 2744 str(rscratch1, dst); 2745 } 2746 2747 void MacroAssembler::incrementw(Register reg, int value) 2748 { 2749 if (value < 0) { decrementw(reg, -value); return; } 2750 if (value == 0) { return; } 2751 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2752 /* else */ { 2753 assert(reg != rscratch2, "invalid dst for register increment"); 2754 movw(rscratch2, (unsigned)value); 2755 addw(reg, reg, rscratch2); 2756 } 2757 } 2758 2759 void MacroAssembler::increment(Register reg, int value) 2760 { 2761 if (value < 0) { decrement(reg, -value); return; } 2762 if (value == 0) { return; } 2763 if (value < (1 << 12)) { add(reg, reg, value); return; } 2764 /* else */ { 2765 assert(reg != rscratch2, "invalid dst for register increment"); 2766 movw(rscratch2, (unsigned)value); 2767 add(reg, reg, rscratch2); 2768 } 2769 } 2770 2771 void MacroAssembler::incrementw(Address dst, int value) 2772 { 2773 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2774 if (dst.getMode() == Address::literal) { 2775 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2776 lea(rscratch2, dst); 2777 dst = Address(rscratch2); 2778 } 2779 ldrw(rscratch1, dst); 2780 incrementw(rscratch1, value); 2781 strw(rscratch1, dst); 2782 } 2783 2784 void MacroAssembler::increment(Address dst, int value) 2785 { 2786 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2787 if (dst.getMode() == Address::literal) { 2788 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2789 lea(rscratch2, dst); 2790 dst = Address(rscratch2); 2791 } 2792 ldr(rscratch1, dst); 2793 increment(rscratch1, value); 2794 str(rscratch1, dst); 2795 } 2796 2797 // Push lots of registers in the bit set supplied. Don't push sp. 2798 // Return the number of words pushed 2799 int MacroAssembler::push(unsigned int bitset, Register stack) { 2800 int words_pushed = 0; 2801 2802 // Scan bitset to accumulate register pairs 2803 unsigned char regs[32]; 2804 int count = 0; 2805 for (int reg = 0; reg <= 30; reg++) { 2806 if (1 & bitset) 2807 regs[count++] = reg; 2808 bitset >>= 1; 2809 } 2810 regs[count++] = zr->raw_encoding(); 2811 count &= ~1; // Only push an even number of regs 2812 2813 if (count) { 2814 stp(as_Register(regs[0]), as_Register(regs[1]), 2815 Address(pre(stack, -count * wordSize))); 2816 words_pushed += 2; 2817 } 2818 for (int i = 2; i < count; i += 2) { 2819 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2820 Address(stack, i * wordSize)); 2821 words_pushed += 2; 2822 } 2823 2824 assert(words_pushed == count, "oops, pushed != count"); 2825 2826 return count; 2827 } 2828 2829 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2830 int words_pushed = 0; 2831 2832 // Scan bitset to accumulate register pairs 2833 unsigned char regs[32]; 2834 int count = 0; 2835 for (int reg = 0; reg <= 30; reg++) { 2836 if (1 & bitset) 2837 regs[count++] = reg; 2838 bitset >>= 1; 2839 } 2840 regs[count++] = zr->raw_encoding(); 2841 count &= ~1; 2842 2843 for (int i = 2; i < count; i += 2) { 2844 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2845 Address(stack, i * wordSize)); 2846 words_pushed += 2; 2847 } 2848 if (count) { 2849 ldp(as_Register(regs[0]), as_Register(regs[1]), 2850 Address(post(stack, count * wordSize))); 2851 words_pushed += 2; 2852 } 2853 2854 assert(words_pushed == count, "oops, pushed != count"); 2855 2856 return count; 2857 } 2858 2859 // Push lots of registers in the bit set supplied. Don't push sp. 2860 // Return the number of dwords pushed 2861 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2862 int words_pushed = 0; 2863 bool use_sve = false; 2864 int sve_vector_size_in_bytes = 0; 2865 2866 #ifdef COMPILER2 2867 use_sve = Matcher::supports_scalable_vector(); 2868 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2869 #endif 2870 2871 // Scan bitset to accumulate register pairs 2872 unsigned char regs[32]; 2873 int count = 0; 2874 for (int reg = 0; reg <= 31; reg++) { 2875 if (1 & bitset) 2876 regs[count++] = reg; 2877 bitset >>= 1; 2878 } 2879 2880 if (count == 0) { 2881 return 0; 2882 } 2883 2884 if (mode == PushPopFull) { 2885 if (use_sve && sve_vector_size_in_bytes > 16) { 2886 mode = PushPopSVE; 2887 } else { 2888 mode = PushPopNeon; 2889 } 2890 } 2891 2892 #ifndef PRODUCT 2893 { 2894 char buffer[48]; 2895 if (mode == PushPopSVE) { 2896 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2897 } else if (mode == PushPopNeon) { 2898 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2899 } else { 2900 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2901 } 2902 block_comment(buffer); 2903 } 2904 #endif 2905 2906 if (mode == PushPopSVE) { 2907 sub(stack, stack, sve_vector_size_in_bytes * count); 2908 for (int i = 0; i < count; i++) { 2909 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2910 } 2911 return count * sve_vector_size_in_bytes / 8; 2912 } 2913 2914 if (mode == PushPopNeon) { 2915 if (count == 1) { 2916 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2917 return 2; 2918 } 2919 2920 bool odd = (count & 1) == 1; 2921 int push_slots = count + (odd ? 1 : 0); 2922 2923 // Always pushing full 128 bit registers. 2924 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2925 words_pushed += 2; 2926 2927 for (int i = 2; i + 1 < count; i += 2) { 2928 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2929 words_pushed += 2; 2930 } 2931 2932 if (odd) { 2933 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2934 words_pushed++; 2935 } 2936 2937 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2938 return count * 2; 2939 } 2940 2941 if (mode == PushPopFp) { 2942 bool odd = (count & 1) == 1; 2943 int push_slots = count + (odd ? 1 : 0); 2944 2945 if (count == 1) { 2946 // Stack pointer must be 16 bytes aligned 2947 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2948 return 1; 2949 } 2950 2951 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2952 words_pushed += 2; 2953 2954 for (int i = 2; i + 1 < count; i += 2) { 2955 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2956 words_pushed += 2; 2957 } 2958 2959 if (odd) { 2960 // Stack pointer must be 16 bytes aligned 2961 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2962 words_pushed++; 2963 } 2964 2965 assert(words_pushed == count, "oops, pushed != count"); 2966 2967 return count; 2968 } 2969 2970 return 0; 2971 } 2972 2973 // Return the number of dwords popped 2974 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2975 int words_pushed = 0; 2976 bool use_sve = false; 2977 int sve_vector_size_in_bytes = 0; 2978 2979 #ifdef COMPILER2 2980 use_sve = Matcher::supports_scalable_vector(); 2981 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2982 #endif 2983 // Scan bitset to accumulate register pairs 2984 unsigned char regs[32]; 2985 int count = 0; 2986 for (int reg = 0; reg <= 31; reg++) { 2987 if (1 & bitset) 2988 regs[count++] = reg; 2989 bitset >>= 1; 2990 } 2991 2992 if (count == 0) { 2993 return 0; 2994 } 2995 2996 if (mode == PushPopFull) { 2997 if (use_sve && sve_vector_size_in_bytes > 16) { 2998 mode = PushPopSVE; 2999 } else { 3000 mode = PushPopNeon; 3001 } 3002 } 3003 3004 #ifndef PRODUCT 3005 { 3006 char buffer[48]; 3007 if (mode == PushPopSVE) { 3008 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 3009 } else if (mode == PushPopNeon) { 3010 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 3011 } else { 3012 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 3013 } 3014 block_comment(buffer); 3015 } 3016 #endif 3017 3018 if (mode == PushPopSVE) { 3019 for (int i = count - 1; i >= 0; i--) { 3020 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 3021 } 3022 add(stack, stack, sve_vector_size_in_bytes * count); 3023 return count * sve_vector_size_in_bytes / 8; 3024 } 3025 3026 if (mode == PushPopNeon) { 3027 if (count == 1) { 3028 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 3029 return 2; 3030 } 3031 3032 bool odd = (count & 1) == 1; 3033 int push_slots = count + (odd ? 1 : 0); 3034 3035 if (odd) { 3036 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 3037 words_pushed++; 3038 } 3039 3040 for (int i = 2; i + 1 < count; i += 2) { 3041 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 3042 words_pushed += 2; 3043 } 3044 3045 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 3046 words_pushed += 2; 3047 3048 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 3049 3050 return count * 2; 3051 } 3052 3053 if (mode == PushPopFp) { 3054 bool odd = (count & 1) == 1; 3055 int push_slots = count + (odd ? 1 : 0); 3056 3057 if (count == 1) { 3058 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 3059 return 1; 3060 } 3061 3062 if (odd) { 3063 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 3064 words_pushed++; 3065 } 3066 3067 for (int i = 2; i + 1 < count; i += 2) { 3068 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 3069 words_pushed += 2; 3070 } 3071 3072 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 3073 words_pushed += 2; 3074 3075 assert(words_pushed == count, "oops, pushed != count"); 3076 3077 return count; 3078 } 3079 3080 return 0; 3081 } 3082 3083 // Return the number of dwords pushed 3084 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 3085 bool use_sve = false; 3086 int sve_predicate_size_in_slots = 0; 3087 3088 #ifdef COMPILER2 3089 use_sve = Matcher::supports_scalable_vector(); 3090 if (use_sve) { 3091 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3092 } 3093 #endif 3094 3095 if (!use_sve) { 3096 return 0; 3097 } 3098 3099 unsigned char regs[PRegister::number_of_registers]; 3100 int count = 0; 3101 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3102 if (1 & bitset) 3103 regs[count++] = reg; 3104 bitset >>= 1; 3105 } 3106 3107 if (count == 0) { 3108 return 0; 3109 } 3110 3111 int total_push_bytes = align_up(sve_predicate_size_in_slots * 3112 VMRegImpl::stack_slot_size * count, 16); 3113 sub(stack, stack, total_push_bytes); 3114 for (int i = 0; i < count; i++) { 3115 sve_str(as_PRegister(regs[i]), Address(stack, i)); 3116 } 3117 return total_push_bytes / 8; 3118 } 3119 3120 // Return the number of dwords popped 3121 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 3122 bool use_sve = false; 3123 int sve_predicate_size_in_slots = 0; 3124 3125 #ifdef COMPILER2 3126 use_sve = Matcher::supports_scalable_vector(); 3127 if (use_sve) { 3128 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3129 } 3130 #endif 3131 3132 if (!use_sve) { 3133 return 0; 3134 } 3135 3136 unsigned char regs[PRegister::number_of_registers]; 3137 int count = 0; 3138 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3139 if (1 & bitset) 3140 regs[count++] = reg; 3141 bitset >>= 1; 3142 } 3143 3144 if (count == 0) { 3145 return 0; 3146 } 3147 3148 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 3149 VMRegImpl::stack_slot_size * count, 16); 3150 for (int i = count - 1; i >= 0; i--) { 3151 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 3152 } 3153 add(stack, stack, total_pop_bytes); 3154 return total_pop_bytes / 8; 3155 } 3156 3157 #ifdef ASSERT 3158 void MacroAssembler::verify_heapbase(const char* msg) { 3159 #if 0 3160 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 3161 assert (Universe::heap() != nullptr, "java heap should be initialized"); 3162 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 3163 // rheapbase is allocated as general register 3164 return; 3165 } 3166 if (CheckCompressedOops) { 3167 Label ok; 3168 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 3169 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3170 br(Assembler::EQ, ok); 3171 stop(msg); 3172 bind(ok); 3173 pop(1 << rscratch1->encoding(), sp); 3174 } 3175 #endif 3176 } 3177 #endif 3178 3179 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3180 assert_different_registers(value, tmp1, tmp2); 3181 Label done, tagged, weak_tagged; 3182 3183 cbz(value, done); // Use null as-is. 3184 tst(value, JNIHandles::tag_mask); // Test for tag. 3185 br(Assembler::NE, tagged); 3186 3187 // Resolve local handle 3188 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3189 verify_oop(value); 3190 b(done); 3191 3192 bind(tagged); 3193 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3194 tbnz(value, 0, weak_tagged); // Test for weak tag. 3195 3196 // Resolve global handle 3197 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3198 verify_oop(value); 3199 b(done); 3200 3201 bind(weak_tagged); 3202 // Resolve jweak. 3203 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3204 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3205 verify_oop(value); 3206 3207 bind(done); 3208 } 3209 3210 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3211 assert_different_registers(value, tmp1, tmp2); 3212 Label done; 3213 3214 cbz(value, done); // Use null as-is. 3215 3216 #ifdef ASSERT 3217 { 3218 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3219 Label valid_global_tag; 3220 tbnz(value, 1, valid_global_tag); // Test for global tag 3221 stop("non global jobject using resolve_global_jobject"); 3222 bind(valid_global_tag); 3223 } 3224 #endif 3225 3226 // Resolve global handle 3227 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3228 verify_oop(value); 3229 3230 bind(done); 3231 } 3232 3233 void MacroAssembler::stop(const char* msg) { 3234 BLOCK_COMMENT(msg); 3235 dcps1(0xdeae); 3236 emit_int64((uintptr_t)msg); 3237 } 3238 3239 void MacroAssembler::unimplemented(const char* what) { 3240 const char* buf = nullptr; 3241 { 3242 ResourceMark rm; 3243 stringStream ss; 3244 ss.print("unimplemented: %s", what); 3245 buf = code_string(ss.as_string()); 3246 } 3247 stop(buf); 3248 } 3249 3250 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3251 #ifdef ASSERT 3252 Label OK; 3253 br(cc, OK); 3254 stop(msg); 3255 bind(OK); 3256 #endif 3257 } 3258 3259 // If a constant does not fit in an immediate field, generate some 3260 // number of MOV instructions and then perform the operation. 3261 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3262 add_sub_imm_insn insn1, 3263 add_sub_reg_insn insn2, 3264 bool is32) { 3265 assert(Rd != zr, "Rd = zr and not setting flags?"); 3266 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3267 if (fits) { 3268 (this->*insn1)(Rd, Rn, imm); 3269 } else { 3270 if (uabs(imm) < (1 << 24)) { 3271 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3272 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3273 } else { 3274 assert_different_registers(Rd, Rn); 3275 mov(Rd, imm); 3276 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3277 } 3278 } 3279 } 3280 3281 // Separate vsn which sets the flags. Optimisations are more restricted 3282 // because we must set the flags correctly. 3283 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3284 add_sub_imm_insn insn1, 3285 add_sub_reg_insn insn2, 3286 bool is32) { 3287 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3288 if (fits) { 3289 (this->*insn1)(Rd, Rn, imm); 3290 } else { 3291 assert_different_registers(Rd, Rn); 3292 assert(Rd != zr, "overflow in immediate operand"); 3293 mov(Rd, imm); 3294 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3295 } 3296 } 3297 3298 3299 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3300 if (increment.is_register()) { 3301 add(Rd, Rn, increment.as_register()); 3302 } else { 3303 add(Rd, Rn, increment.as_constant()); 3304 } 3305 } 3306 3307 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3308 if (increment.is_register()) { 3309 addw(Rd, Rn, increment.as_register()); 3310 } else { 3311 addw(Rd, Rn, increment.as_constant()); 3312 } 3313 } 3314 3315 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3316 if (decrement.is_register()) { 3317 sub(Rd, Rn, decrement.as_register()); 3318 } else { 3319 sub(Rd, Rn, decrement.as_constant()); 3320 } 3321 } 3322 3323 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3324 if (decrement.is_register()) { 3325 subw(Rd, Rn, decrement.as_register()); 3326 } else { 3327 subw(Rd, Rn, decrement.as_constant()); 3328 } 3329 } 3330 3331 void MacroAssembler::reinit_heapbase() 3332 { 3333 if (UseCompressedOops) { 3334 if (Universe::is_fully_initialized()) { 3335 mov(rheapbase, CompressedOops::base()); 3336 } else { 3337 lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3338 ldr(rheapbase, Address(rheapbase)); 3339 } 3340 } 3341 } 3342 3343 // this simulates the behaviour of the x86 cmpxchg instruction using a 3344 // load linked/store conditional pair. we use the acquire/release 3345 // versions of these instructions so that we flush pending writes as 3346 // per Java semantics. 3347 3348 // n.b the x86 version assumes the old value to be compared against is 3349 // in rax and updates rax with the value located in memory if the 3350 // cmpxchg fails. we supply a register for the old value explicitly 3351 3352 // the aarch64 load linked/store conditional instructions do not 3353 // accept an offset. so, unlike x86, we must provide a plain register 3354 // to identify the memory word to be compared/exchanged rather than a 3355 // register+offset Address. 3356 3357 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3358 Label &succeed, Label *fail) { 3359 // oldv holds comparison value 3360 // newv holds value to write in exchange 3361 // addr identifies memory word to compare against/update 3362 if (UseLSE) { 3363 mov(tmp, oldv); 3364 casal(Assembler::xword, oldv, newv, addr); 3365 cmp(tmp, oldv); 3366 br(Assembler::EQ, succeed); 3367 membar(AnyAny); 3368 } else { 3369 Label retry_load, nope; 3370 prfm(Address(addr), PSTL1STRM); 3371 bind(retry_load); 3372 // flush and load exclusive from the memory location 3373 // and fail if it is not what we expect 3374 ldaxr(tmp, addr); 3375 cmp(tmp, oldv); 3376 br(Assembler::NE, nope); 3377 // if we store+flush with no intervening write tmp will be zero 3378 stlxr(tmp, newv, addr); 3379 cbzw(tmp, succeed); 3380 // retry so we only ever return after a load fails to compare 3381 // ensures we don't return a stale value after a failed write. 3382 b(retry_load); 3383 // if the memory word differs we return it in oldv and signal a fail 3384 bind(nope); 3385 membar(AnyAny); 3386 mov(oldv, tmp); 3387 } 3388 if (fail) 3389 b(*fail); 3390 } 3391 3392 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3393 Label &succeed, Label *fail) { 3394 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3395 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3396 } 3397 3398 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3399 Label &succeed, Label *fail) { 3400 // oldv holds comparison value 3401 // newv holds value to write in exchange 3402 // addr identifies memory word to compare against/update 3403 // tmp returns 0/1 for success/failure 3404 if (UseLSE) { 3405 mov(tmp, oldv); 3406 casal(Assembler::word, oldv, newv, addr); 3407 cmp(tmp, oldv); 3408 br(Assembler::EQ, succeed); 3409 membar(AnyAny); 3410 } else { 3411 Label retry_load, nope; 3412 prfm(Address(addr), PSTL1STRM); 3413 bind(retry_load); 3414 // flush and load exclusive from the memory location 3415 // and fail if it is not what we expect 3416 ldaxrw(tmp, addr); 3417 cmp(tmp, oldv); 3418 br(Assembler::NE, nope); 3419 // if we store+flush with no intervening write tmp will be zero 3420 stlxrw(tmp, newv, addr); 3421 cbzw(tmp, succeed); 3422 // retry so we only ever return after a load fails to compare 3423 // ensures we don't return a stale value after a failed write. 3424 b(retry_load); 3425 // if the memory word differs we return it in oldv and signal a fail 3426 bind(nope); 3427 membar(AnyAny); 3428 mov(oldv, tmp); 3429 } 3430 if (fail) 3431 b(*fail); 3432 } 3433 3434 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3435 // doesn't retry and may fail spuriously. If the oldval is wanted, 3436 // Pass a register for the result, otherwise pass noreg. 3437 3438 // Clobbers rscratch1 3439 void MacroAssembler::cmpxchg(Register addr, Register expected, 3440 Register new_val, 3441 enum operand_size size, 3442 bool acquire, bool release, 3443 bool weak, 3444 Register result) { 3445 if (result == noreg) result = rscratch1; 3446 BLOCK_COMMENT("cmpxchg {"); 3447 if (UseLSE) { 3448 mov(result, expected); 3449 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3450 compare_eq(result, expected, size); 3451 #ifdef ASSERT 3452 // Poison rscratch1 which is written on !UseLSE branch 3453 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3454 #endif 3455 } else { 3456 Label retry_load, done; 3457 prfm(Address(addr), PSTL1STRM); 3458 bind(retry_load); 3459 load_exclusive(result, addr, size, acquire); 3460 compare_eq(result, expected, size); 3461 br(Assembler::NE, done); 3462 store_exclusive(rscratch1, new_val, addr, size, release); 3463 if (weak) { 3464 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3465 } else { 3466 cbnzw(rscratch1, retry_load); 3467 } 3468 bind(done); 3469 } 3470 BLOCK_COMMENT("} cmpxchg"); 3471 } 3472 3473 // A generic comparison. Only compares for equality, clobbers rscratch1. 3474 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3475 if (size == xword) { 3476 cmp(rm, rn); 3477 } else if (size == word) { 3478 cmpw(rm, rn); 3479 } else if (size == halfword) { 3480 eorw(rscratch1, rm, rn); 3481 ands(zr, rscratch1, 0xffff); 3482 } else if (size == byte) { 3483 eorw(rscratch1, rm, rn); 3484 ands(zr, rscratch1, 0xff); 3485 } else { 3486 ShouldNotReachHere(); 3487 } 3488 } 3489 3490 3491 static bool different(Register a, RegisterOrConstant b, Register c) { 3492 if (b.is_constant()) 3493 return a != c; 3494 else 3495 return a != b.as_register() && a != c && b.as_register() != c; 3496 } 3497 3498 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3499 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3500 if (UseLSE) { \ 3501 prev = prev->is_valid() ? prev : zr; \ 3502 if (incr.is_register()) { \ 3503 AOP(sz, incr.as_register(), prev, addr); \ 3504 } else { \ 3505 mov(rscratch2, incr.as_constant()); \ 3506 AOP(sz, rscratch2, prev, addr); \ 3507 } \ 3508 return; \ 3509 } \ 3510 Register result = rscratch2; \ 3511 if (prev->is_valid()) \ 3512 result = different(prev, incr, addr) ? prev : rscratch2; \ 3513 \ 3514 Label retry_load; \ 3515 prfm(Address(addr), PSTL1STRM); \ 3516 bind(retry_load); \ 3517 LDXR(result, addr); \ 3518 OP(rscratch1, result, incr); \ 3519 STXR(rscratch2, rscratch1, addr); \ 3520 cbnzw(rscratch2, retry_load); \ 3521 if (prev->is_valid() && prev != result) { \ 3522 IOP(prev, rscratch1, incr); \ 3523 } \ 3524 } 3525 3526 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3527 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3528 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3529 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3530 3531 #undef ATOMIC_OP 3532 3533 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3534 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3535 if (UseLSE) { \ 3536 prev = prev->is_valid() ? prev : zr; \ 3537 AOP(sz, newv, prev, addr); \ 3538 return; \ 3539 } \ 3540 Register result = rscratch2; \ 3541 if (prev->is_valid()) \ 3542 result = different(prev, newv, addr) ? prev : rscratch2; \ 3543 \ 3544 Label retry_load; \ 3545 prfm(Address(addr), PSTL1STRM); \ 3546 bind(retry_load); \ 3547 LDXR(result, addr); \ 3548 STXR(rscratch1, newv, addr); \ 3549 cbnzw(rscratch1, retry_load); \ 3550 if (prev->is_valid() && prev != result) \ 3551 mov(prev, result); \ 3552 } 3553 3554 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3555 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3556 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3557 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3558 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3559 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3560 3561 #undef ATOMIC_XCHG 3562 3563 #ifndef PRODUCT 3564 extern "C" void findpc(intptr_t x); 3565 #endif 3566 3567 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3568 { 3569 // In order to get locks to work, we need to fake a in_VM state 3570 if (ShowMessageBoxOnError ) { 3571 JavaThread* thread = JavaThread::current(); 3572 JavaThreadState saved_state = thread->thread_state(); 3573 thread->set_thread_state(_thread_in_vm); 3574 #ifndef PRODUCT 3575 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3576 ttyLocker ttyl; 3577 BytecodeCounter::print(); 3578 } 3579 #endif 3580 if (os::message_box(msg, "Execution stopped, print registers?")) { 3581 ttyLocker ttyl; 3582 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3583 #ifndef PRODUCT 3584 tty->cr(); 3585 findpc(pc); 3586 tty->cr(); 3587 #endif 3588 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3589 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3590 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3591 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3592 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3593 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3594 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3595 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3596 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3597 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3598 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3599 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3600 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3601 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3602 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3603 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3604 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3605 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3606 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3607 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3608 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3609 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3610 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3611 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3612 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3613 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3614 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3615 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3616 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3617 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3618 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3619 BREAKPOINT; 3620 } 3621 } 3622 fatal("DEBUG MESSAGE: %s", msg); 3623 } 3624 3625 RegSet MacroAssembler::call_clobbered_gp_registers() { 3626 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3627 #ifndef R18_RESERVED 3628 regs += r18_tls; 3629 #endif 3630 return regs; 3631 } 3632 3633 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3634 int step = 4 * wordSize; 3635 push(call_clobbered_gp_registers() - exclude, sp); 3636 sub(sp, sp, step); 3637 mov(rscratch1, -step); 3638 // Push v0-v7, v16-v31. 3639 for (int i = 31; i>= 4; i -= 4) { 3640 if (i <= v7->encoding() || i >= v16->encoding()) 3641 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3642 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3643 } 3644 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3645 as_FloatRegister(3), T1D, Address(sp)); 3646 } 3647 3648 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3649 for (int i = 0; i < 32; i += 4) { 3650 if (i <= v7->encoding() || i >= v16->encoding()) 3651 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3652 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3653 } 3654 3655 reinitialize_ptrue(); 3656 3657 pop(call_clobbered_gp_registers() - exclude, sp); 3658 } 3659 3660 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3661 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3662 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3663 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3664 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3665 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3666 sve_str(as_FloatRegister(i), Address(sp, i)); 3667 } 3668 } else { 3669 int step = (save_vectors ? 8 : 4) * wordSize; 3670 mov(rscratch1, -step); 3671 sub(sp, sp, step); 3672 for (int i = 28; i >= 4; i -= 4) { 3673 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3674 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3675 } 3676 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3677 } 3678 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3679 sub(sp, sp, total_predicate_in_bytes); 3680 for (int i = 0; i < PRegister::number_of_registers; i++) { 3681 sve_str(as_PRegister(i), Address(sp, i)); 3682 } 3683 } 3684 } 3685 3686 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3687 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3688 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3689 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3690 sve_ldr(as_PRegister(i), Address(sp, i)); 3691 } 3692 add(sp, sp, total_predicate_in_bytes); 3693 } 3694 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3695 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3696 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3697 } 3698 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3699 } else { 3700 int step = (restore_vectors ? 8 : 4) * wordSize; 3701 for (int i = 0; i <= 28; i += 4) 3702 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3703 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3704 } 3705 3706 // We may use predicate registers and rely on ptrue with SVE, 3707 // regardless of wide vector (> 8 bytes) used or not. 3708 if (use_sve) { 3709 reinitialize_ptrue(); 3710 } 3711 3712 // integer registers except lr & sp 3713 pop(RegSet::range(r0, r17), sp); 3714 #ifdef R18_RESERVED 3715 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3716 pop(RegSet::range(r20, r29), sp); 3717 #else 3718 pop(RegSet::range(r18_tls, r29), sp); 3719 #endif 3720 } 3721 3722 /** 3723 * Helpers for multiply_to_len(). 3724 */ 3725 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3726 Register src1, Register src2) { 3727 adds(dest_lo, dest_lo, src1); 3728 adc(dest_hi, dest_hi, zr); 3729 adds(dest_lo, dest_lo, src2); 3730 adc(final_dest_hi, dest_hi, zr); 3731 } 3732 3733 // Generate an address from (r + r1 extend offset). "size" is the 3734 // size of the operand. The result may be in rscratch2. 3735 Address MacroAssembler::offsetted_address(Register r, Register r1, 3736 Address::extend ext, int offset, int size) { 3737 if (offset || (ext.shift() % size != 0)) { 3738 lea(rscratch2, Address(r, r1, ext)); 3739 return Address(rscratch2, offset); 3740 } else { 3741 return Address(r, r1, ext); 3742 } 3743 } 3744 3745 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3746 { 3747 assert(offset >= 0, "spill to negative address?"); 3748 // Offset reachable ? 3749 // Not aligned - 9 bits signed offset 3750 // Aligned - 12 bits unsigned offset shifted 3751 Register base = sp; 3752 if ((offset & (size-1)) && offset >= (1<<8)) { 3753 add(tmp, base, offset & ((1<<12)-1)); 3754 base = tmp; 3755 offset &= -1u<<12; 3756 } 3757 3758 if (offset >= (1<<12) * size) { 3759 add(tmp, base, offset & (((1<<12)-1)<<12)); 3760 base = tmp; 3761 offset &= ~(((1<<12)-1)<<12); 3762 } 3763 3764 return Address(base, offset); 3765 } 3766 3767 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3768 assert(offset >= 0, "spill to negative address?"); 3769 3770 Register base = sp; 3771 3772 // An immediate offset in the range 0 to 255 which is multiplied 3773 // by the current vector or predicate register size in bytes. 3774 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3775 return Address(base, offset / sve_reg_size_in_bytes); 3776 } 3777 3778 add(tmp, base, offset); 3779 return Address(tmp); 3780 } 3781 3782 // Checks whether offset is aligned. 3783 // Returns true if it is, else false. 3784 bool MacroAssembler::merge_alignment_check(Register base, 3785 size_t size, 3786 int64_t cur_offset, 3787 int64_t prev_offset) const { 3788 if (AvoidUnalignedAccesses) { 3789 if (base == sp) { 3790 // Checks whether low offset if aligned to pair of registers. 3791 int64_t pair_mask = size * 2 - 1; 3792 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3793 return (offset & pair_mask) == 0; 3794 } else { // If base is not sp, we can't guarantee the access is aligned. 3795 return false; 3796 } 3797 } else { 3798 int64_t mask = size - 1; 3799 // Load/store pair instruction only supports element size aligned offset. 3800 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3801 } 3802 } 3803 3804 // Checks whether current and previous loads/stores can be merged. 3805 // Returns true if it can be merged, else false. 3806 bool MacroAssembler::ldst_can_merge(Register rt, 3807 const Address &adr, 3808 size_t cur_size_in_bytes, 3809 bool is_store) const { 3810 address prev = pc() - NativeInstruction::instruction_size; 3811 address last = code()->last_insn(); 3812 3813 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3814 return false; 3815 } 3816 3817 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3818 return false; 3819 } 3820 3821 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3822 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3823 3824 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3825 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3826 3827 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3828 return false; 3829 } 3830 3831 int64_t max_offset = 63 * prev_size_in_bytes; 3832 int64_t min_offset = -64 * prev_size_in_bytes; 3833 3834 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3835 3836 // Only same base can be merged. 3837 if (adr.base() != prev_ldst->base()) { 3838 return false; 3839 } 3840 3841 int64_t cur_offset = adr.offset(); 3842 int64_t prev_offset = prev_ldst->offset(); 3843 size_t diff = abs(cur_offset - prev_offset); 3844 if (diff != prev_size_in_bytes) { 3845 return false; 3846 } 3847 3848 // Following cases can not be merged: 3849 // ldr x2, [x2, #8] 3850 // ldr x3, [x2, #16] 3851 // or: 3852 // ldr x2, [x3, #8] 3853 // ldr x2, [x3, #16] 3854 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3855 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3856 return false; 3857 } 3858 3859 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3860 // Offset range must be in ldp/stp instruction's range. 3861 if (low_offset > max_offset || low_offset < min_offset) { 3862 return false; 3863 } 3864 3865 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3866 return true; 3867 } 3868 3869 return false; 3870 } 3871 3872 // Merge current load/store with previous load/store into ldp/stp. 3873 void MacroAssembler::merge_ldst(Register rt, 3874 const Address &adr, 3875 size_t cur_size_in_bytes, 3876 bool is_store) { 3877 3878 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3879 3880 Register rt_low, rt_high; 3881 address prev = pc() - NativeInstruction::instruction_size; 3882 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3883 3884 int64_t offset; 3885 3886 if (adr.offset() < prev_ldst->offset()) { 3887 offset = adr.offset(); 3888 rt_low = rt; 3889 rt_high = prev_ldst->target(); 3890 } else { 3891 offset = prev_ldst->offset(); 3892 rt_low = prev_ldst->target(); 3893 rt_high = rt; 3894 } 3895 3896 Address adr_p = Address(prev_ldst->base(), offset); 3897 // Overwrite previous generated binary. 3898 code_section()->set_end(prev); 3899 3900 const size_t sz = prev_ldst->size_in_bytes(); 3901 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3902 if (!is_store) { 3903 BLOCK_COMMENT("merged ldr pair"); 3904 if (sz == 8) { 3905 ldp(rt_low, rt_high, adr_p); 3906 } else { 3907 ldpw(rt_low, rt_high, adr_p); 3908 } 3909 } else { 3910 BLOCK_COMMENT("merged str pair"); 3911 if (sz == 8) { 3912 stp(rt_low, rt_high, adr_p); 3913 } else { 3914 stpw(rt_low, rt_high, adr_p); 3915 } 3916 } 3917 } 3918 3919 /** 3920 * Multiply 64 bit by 64 bit first loop. 3921 */ 3922 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3923 Register y, Register y_idx, Register z, 3924 Register carry, Register product, 3925 Register idx, Register kdx) { 3926 // 3927 // jlong carry, x[], y[], z[]; 3928 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3929 // huge_128 product = y[idx] * x[xstart] + carry; 3930 // z[kdx] = (jlong)product; 3931 // carry = (jlong)(product >>> 64); 3932 // } 3933 // z[xstart] = carry; 3934 // 3935 3936 Label L_first_loop, L_first_loop_exit; 3937 Label L_one_x, L_one_y, L_multiply; 3938 3939 subsw(xstart, xstart, 1); 3940 br(Assembler::MI, L_one_x); 3941 3942 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3943 ldr(x_xstart, Address(rscratch1)); 3944 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3945 3946 bind(L_first_loop); 3947 subsw(idx, idx, 1); 3948 br(Assembler::MI, L_first_loop_exit); 3949 subsw(idx, idx, 1); 3950 br(Assembler::MI, L_one_y); 3951 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3952 ldr(y_idx, Address(rscratch1)); 3953 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3954 bind(L_multiply); 3955 3956 // AArch64 has a multiply-accumulate instruction that we can't use 3957 // here because it has no way to process carries, so we have to use 3958 // separate add and adc instructions. Bah. 3959 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3960 mul(product, x_xstart, y_idx); 3961 adds(product, product, carry); 3962 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3963 3964 subw(kdx, kdx, 2); 3965 ror(product, product, 32); // back to big-endian 3966 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3967 3968 b(L_first_loop); 3969 3970 bind(L_one_y); 3971 ldrw(y_idx, Address(y, 0)); 3972 b(L_multiply); 3973 3974 bind(L_one_x); 3975 ldrw(x_xstart, Address(x, 0)); 3976 b(L_first_loop); 3977 3978 bind(L_first_loop_exit); 3979 } 3980 3981 /** 3982 * Multiply 128 bit by 128. Unrolled inner loop. 3983 * 3984 */ 3985 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3986 Register carry, Register carry2, 3987 Register idx, Register jdx, 3988 Register yz_idx1, Register yz_idx2, 3989 Register tmp, Register tmp3, Register tmp4, 3990 Register tmp6, Register product_hi) { 3991 3992 // jlong carry, x[], y[], z[]; 3993 // int kdx = ystart+1; 3994 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3995 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3996 // jlong carry2 = (jlong)(tmp3 >>> 64); 3997 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3998 // carry = (jlong)(tmp4 >>> 64); 3999 // z[kdx+idx+1] = (jlong)tmp3; 4000 // z[kdx+idx] = (jlong)tmp4; 4001 // } 4002 // idx += 2; 4003 // if (idx > 0) { 4004 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 4005 // z[kdx+idx] = (jlong)yz_idx1; 4006 // carry = (jlong)(yz_idx1 >>> 64); 4007 // } 4008 // 4009 4010 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 4011 4012 lsrw(jdx, idx, 2); 4013 4014 bind(L_third_loop); 4015 4016 subsw(jdx, jdx, 1); 4017 br(Assembler::MI, L_third_loop_exit); 4018 subw(idx, idx, 4); 4019 4020 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4021 4022 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 4023 4024 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4025 4026 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 4027 ror(yz_idx2, yz_idx2, 32); 4028 4029 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 4030 4031 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4032 umulh(tmp4, product_hi, yz_idx1); 4033 4034 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 4035 ror(rscratch2, rscratch2, 32); 4036 4037 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 4038 umulh(carry2, product_hi, yz_idx2); 4039 4040 // propagate sum of both multiplications into carry:tmp4:tmp3 4041 adds(tmp3, tmp3, carry); 4042 adc(tmp4, tmp4, zr); 4043 adds(tmp3, tmp3, rscratch1); 4044 adcs(tmp4, tmp4, tmp); 4045 adc(carry, carry2, zr); 4046 adds(tmp4, tmp4, rscratch2); 4047 adc(carry, carry, zr); 4048 4049 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 4050 ror(tmp4, tmp4, 32); 4051 stp(tmp4, tmp3, Address(tmp6, 0)); 4052 4053 b(L_third_loop); 4054 bind (L_third_loop_exit); 4055 4056 andw (idx, idx, 0x3); 4057 cbz(idx, L_post_third_loop_done); 4058 4059 Label L_check_1; 4060 subsw(idx, idx, 2); 4061 br(Assembler::MI, L_check_1); 4062 4063 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4064 ldr(yz_idx1, Address(rscratch1, 0)); 4065 ror(yz_idx1, yz_idx1, 32); 4066 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4067 umulh(tmp4, product_hi, yz_idx1); 4068 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4069 ldr(yz_idx2, Address(rscratch1, 0)); 4070 ror(yz_idx2, yz_idx2, 32); 4071 4072 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 4073 4074 ror(tmp3, tmp3, 32); 4075 str(tmp3, Address(rscratch1, 0)); 4076 4077 bind (L_check_1); 4078 4079 andw (idx, idx, 0x1); 4080 subsw(idx, idx, 1); 4081 br(Assembler::MI, L_post_third_loop_done); 4082 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4083 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 4084 umulh(carry2, tmp4, product_hi); 4085 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4086 4087 add2_with_carry(carry2, tmp3, tmp4, carry); 4088 4089 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4090 extr(carry, carry2, tmp3, 32); 4091 4092 bind(L_post_third_loop_done); 4093 } 4094 4095 /** 4096 * Code for BigInteger::multiplyToLen() intrinsic. 4097 * 4098 * r0: x 4099 * r1: xlen 4100 * r2: y 4101 * r3: ylen 4102 * r4: z 4103 * r5: tmp0 4104 * r10: tmp1 4105 * r11: tmp2 4106 * r12: tmp3 4107 * r13: tmp4 4108 * r14: tmp5 4109 * r15: tmp6 4110 * r16: tmp7 4111 * 4112 */ 4113 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 4114 Register z, Register tmp0, 4115 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4116 Register tmp5, Register tmp6, Register product_hi) { 4117 4118 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 4119 4120 const Register idx = tmp1; 4121 const Register kdx = tmp2; 4122 const Register xstart = tmp3; 4123 4124 const Register y_idx = tmp4; 4125 const Register carry = tmp5; 4126 const Register product = xlen; 4127 const Register x_xstart = tmp0; 4128 4129 // First Loop. 4130 // 4131 // final static long LONG_MASK = 0xffffffffL; 4132 // int xstart = xlen - 1; 4133 // int ystart = ylen - 1; 4134 // long carry = 0; 4135 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 4136 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 4137 // z[kdx] = (int)product; 4138 // carry = product >>> 32; 4139 // } 4140 // z[xstart] = (int)carry; 4141 // 4142 4143 movw(idx, ylen); // idx = ylen; 4144 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 4145 mov(carry, zr); // carry = 0; 4146 4147 Label L_done; 4148 4149 movw(xstart, xlen); 4150 subsw(xstart, xstart, 1); 4151 br(Assembler::MI, L_done); 4152 4153 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 4154 4155 Label L_second_loop; 4156 cbzw(kdx, L_second_loop); 4157 4158 Label L_carry; 4159 subw(kdx, kdx, 1); 4160 cbzw(kdx, L_carry); 4161 4162 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4163 lsr(carry, carry, 32); 4164 subw(kdx, kdx, 1); 4165 4166 bind(L_carry); 4167 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4168 4169 // Second and third (nested) loops. 4170 // 4171 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4172 // carry = 0; 4173 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4174 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4175 // (z[k] & LONG_MASK) + carry; 4176 // z[k] = (int)product; 4177 // carry = product >>> 32; 4178 // } 4179 // z[i] = (int)carry; 4180 // } 4181 // 4182 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 4183 4184 const Register jdx = tmp1; 4185 4186 bind(L_second_loop); 4187 mov(carry, zr); // carry = 0; 4188 movw(jdx, ylen); // j = ystart+1 4189 4190 subsw(xstart, xstart, 1); // i = xstart-1; 4191 br(Assembler::MI, L_done); 4192 4193 str(z, Address(pre(sp, -4 * wordSize))); 4194 4195 Label L_last_x; 4196 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4197 subsw(xstart, xstart, 1); // i = xstart-1; 4198 br(Assembler::MI, L_last_x); 4199 4200 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4201 ldr(product_hi, Address(rscratch1)); 4202 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4203 4204 Label L_third_loop_prologue; 4205 bind(L_third_loop_prologue); 4206 4207 str(ylen, Address(sp, wordSize)); 4208 stp(x, xstart, Address(sp, 2 * wordSize)); 4209 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4210 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4211 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4212 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4213 4214 addw(tmp3, xlen, 1); 4215 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4216 subsw(tmp3, tmp3, 1); 4217 br(Assembler::MI, L_done); 4218 4219 lsr(carry, carry, 32); 4220 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4221 b(L_second_loop); 4222 4223 // Next infrequent code is moved outside loops. 4224 bind(L_last_x); 4225 ldrw(product_hi, Address(x, 0)); 4226 b(L_third_loop_prologue); 4227 4228 bind(L_done); 4229 } 4230 4231 // Code for BigInteger::mulAdd intrinsic 4232 // out = r0 4233 // in = r1 4234 // offset = r2 (already out.length-offset) 4235 // len = r3 4236 // k = r4 4237 // 4238 // pseudo code from java implementation: 4239 // carry = 0; 4240 // offset = out.length-offset - 1; 4241 // for (int j=len-1; j >= 0; j--) { 4242 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4243 // out[offset--] = (int)product; 4244 // carry = product >>> 32; 4245 // } 4246 // return (int)carry; 4247 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4248 Register len, Register k) { 4249 Label LOOP, END; 4250 // pre-loop 4251 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4252 csel(out, zr, out, Assembler::EQ); 4253 br(Assembler::EQ, END); 4254 add(in, in, len, LSL, 2); // in[j+1] address 4255 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4256 mov(out, zr); // used to keep carry now 4257 BIND(LOOP); 4258 ldrw(rscratch1, Address(pre(in, -4))); 4259 madd(rscratch1, rscratch1, k, out); 4260 ldrw(rscratch2, Address(pre(offset, -4))); 4261 add(rscratch1, rscratch1, rscratch2); 4262 strw(rscratch1, Address(offset)); 4263 lsr(out, rscratch1, 32); 4264 subs(len, len, 1); 4265 br(Assembler::NE, LOOP); 4266 BIND(END); 4267 } 4268 4269 /** 4270 * Emits code to update CRC-32 with a byte value according to constants in table 4271 * 4272 * @param [in,out]crc Register containing the crc. 4273 * @param [in]val Register containing the byte to fold into the CRC. 4274 * @param [in]table Register containing the table of crc constants. 4275 * 4276 * uint32_t crc; 4277 * val = crc_table[(val ^ crc) & 0xFF]; 4278 * crc = val ^ (crc >> 8); 4279 * 4280 */ 4281 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4282 eor(val, val, crc); 4283 andr(val, val, 0xff); 4284 ldrw(val, Address(table, val, Address::lsl(2))); 4285 eor(crc, val, crc, Assembler::LSR, 8); 4286 } 4287 4288 /** 4289 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4290 * 4291 * @param [in,out]crc Register containing the crc. 4292 * @param [in]v Register containing the 32-bit to fold into the CRC. 4293 * @param [in]table0 Register containing table 0 of crc constants. 4294 * @param [in]table1 Register containing table 1 of crc constants. 4295 * @param [in]table2 Register containing table 2 of crc constants. 4296 * @param [in]table3 Register containing table 3 of crc constants. 4297 * 4298 * uint32_t crc; 4299 * v = crc ^ v 4300 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4301 * 4302 */ 4303 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4304 Register table0, Register table1, Register table2, Register table3, 4305 bool upper) { 4306 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4307 uxtb(tmp, v); 4308 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4309 ubfx(tmp, v, 8, 8); 4310 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4311 eor(crc, crc, tmp); 4312 ubfx(tmp, v, 16, 8); 4313 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4314 eor(crc, crc, tmp); 4315 ubfx(tmp, v, 24, 8); 4316 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4317 eor(crc, crc, tmp); 4318 } 4319 4320 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4321 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4322 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4323 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4324 4325 subs(tmp0, len, 384); 4326 mvnw(crc, crc); 4327 br(Assembler::GE, CRC_by128_pre); 4328 BIND(CRC_less128); 4329 subs(len, len, 32); 4330 br(Assembler::GE, CRC_by32_loop); 4331 BIND(CRC_less32); 4332 adds(len, len, 32 - 4); 4333 br(Assembler::GE, CRC_by4_loop); 4334 adds(len, len, 4); 4335 br(Assembler::GT, CRC_by1_loop); 4336 b(L_exit); 4337 4338 BIND(CRC_by32_loop); 4339 ldp(tmp0, tmp1, Address(buf)); 4340 crc32x(crc, crc, tmp0); 4341 ldp(tmp2, tmp3, Address(buf, 16)); 4342 crc32x(crc, crc, tmp1); 4343 add(buf, buf, 32); 4344 crc32x(crc, crc, tmp2); 4345 subs(len, len, 32); 4346 crc32x(crc, crc, tmp3); 4347 br(Assembler::GE, CRC_by32_loop); 4348 cmn(len, (u1)32); 4349 br(Assembler::NE, CRC_less32); 4350 b(L_exit); 4351 4352 BIND(CRC_by4_loop); 4353 ldrw(tmp0, Address(post(buf, 4))); 4354 subs(len, len, 4); 4355 crc32w(crc, crc, tmp0); 4356 br(Assembler::GE, CRC_by4_loop); 4357 adds(len, len, 4); 4358 br(Assembler::LE, L_exit); 4359 BIND(CRC_by1_loop); 4360 ldrb(tmp0, Address(post(buf, 1))); 4361 subs(len, len, 1); 4362 crc32b(crc, crc, tmp0); 4363 br(Assembler::GT, CRC_by1_loop); 4364 b(L_exit); 4365 4366 BIND(CRC_by128_pre); 4367 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4368 4*256*sizeof(juint) + 8*sizeof(juint)); 4369 mov(crc, 0); 4370 crc32x(crc, crc, tmp0); 4371 crc32x(crc, crc, tmp1); 4372 4373 cbnz(len, CRC_less128); 4374 4375 BIND(L_exit); 4376 mvnw(crc, crc); 4377 } 4378 4379 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4380 Register len, Register tmp0, Register tmp1, Register tmp2, 4381 Register tmp3) { 4382 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4383 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4384 4385 mvnw(crc, crc); 4386 4387 subs(len, len, 128); 4388 br(Assembler::GE, CRC_by64_pre); 4389 BIND(CRC_less64); 4390 adds(len, len, 128-32); 4391 br(Assembler::GE, CRC_by32_loop); 4392 BIND(CRC_less32); 4393 adds(len, len, 32-4); 4394 br(Assembler::GE, CRC_by4_loop); 4395 adds(len, len, 4); 4396 br(Assembler::GT, CRC_by1_loop); 4397 b(L_exit); 4398 4399 BIND(CRC_by32_loop); 4400 ldp(tmp0, tmp1, Address(post(buf, 16))); 4401 subs(len, len, 32); 4402 crc32x(crc, crc, tmp0); 4403 ldr(tmp2, Address(post(buf, 8))); 4404 crc32x(crc, crc, tmp1); 4405 ldr(tmp3, Address(post(buf, 8))); 4406 crc32x(crc, crc, tmp2); 4407 crc32x(crc, crc, tmp3); 4408 br(Assembler::GE, CRC_by32_loop); 4409 cmn(len, (u1)32); 4410 br(Assembler::NE, CRC_less32); 4411 b(L_exit); 4412 4413 BIND(CRC_by4_loop); 4414 ldrw(tmp0, Address(post(buf, 4))); 4415 subs(len, len, 4); 4416 crc32w(crc, crc, tmp0); 4417 br(Assembler::GE, CRC_by4_loop); 4418 adds(len, len, 4); 4419 br(Assembler::LE, L_exit); 4420 BIND(CRC_by1_loop); 4421 ldrb(tmp0, Address(post(buf, 1))); 4422 subs(len, len, 1); 4423 crc32b(crc, crc, tmp0); 4424 br(Assembler::GT, CRC_by1_loop); 4425 b(L_exit); 4426 4427 BIND(CRC_by64_pre); 4428 sub(buf, buf, 8); 4429 ldp(tmp0, tmp1, Address(buf, 8)); 4430 crc32x(crc, crc, tmp0); 4431 ldr(tmp2, Address(buf, 24)); 4432 crc32x(crc, crc, tmp1); 4433 ldr(tmp3, Address(buf, 32)); 4434 crc32x(crc, crc, tmp2); 4435 ldr(tmp0, Address(buf, 40)); 4436 crc32x(crc, crc, tmp3); 4437 ldr(tmp1, Address(buf, 48)); 4438 crc32x(crc, crc, tmp0); 4439 ldr(tmp2, Address(buf, 56)); 4440 crc32x(crc, crc, tmp1); 4441 ldr(tmp3, Address(pre(buf, 64))); 4442 4443 b(CRC_by64_loop); 4444 4445 align(CodeEntryAlignment); 4446 BIND(CRC_by64_loop); 4447 subs(len, len, 64); 4448 crc32x(crc, crc, tmp2); 4449 ldr(tmp0, Address(buf, 8)); 4450 crc32x(crc, crc, tmp3); 4451 ldr(tmp1, Address(buf, 16)); 4452 crc32x(crc, crc, tmp0); 4453 ldr(tmp2, Address(buf, 24)); 4454 crc32x(crc, crc, tmp1); 4455 ldr(tmp3, Address(buf, 32)); 4456 crc32x(crc, crc, tmp2); 4457 ldr(tmp0, Address(buf, 40)); 4458 crc32x(crc, crc, tmp3); 4459 ldr(tmp1, Address(buf, 48)); 4460 crc32x(crc, crc, tmp0); 4461 ldr(tmp2, Address(buf, 56)); 4462 crc32x(crc, crc, tmp1); 4463 ldr(tmp3, Address(pre(buf, 64))); 4464 br(Assembler::GE, CRC_by64_loop); 4465 4466 // post-loop 4467 crc32x(crc, crc, tmp2); 4468 crc32x(crc, crc, tmp3); 4469 4470 sub(len, len, 64); 4471 add(buf, buf, 8); 4472 cmn(len, (u1)128); 4473 br(Assembler::NE, CRC_less64); 4474 BIND(L_exit); 4475 mvnw(crc, crc); 4476 } 4477 4478 /** 4479 * @param crc register containing existing CRC (32-bit) 4480 * @param buf register pointing to input byte buffer (byte*) 4481 * @param len register containing number of bytes 4482 * @param table register that will contain address of CRC table 4483 * @param tmp scratch register 4484 */ 4485 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4486 Register table0, Register table1, Register table2, Register table3, 4487 Register tmp, Register tmp2, Register tmp3) { 4488 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4489 4490 if (UseCryptoPmullForCRC32) { 4491 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4492 return; 4493 } 4494 4495 if (UseCRC32) { 4496 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4497 return; 4498 } 4499 4500 mvnw(crc, crc); 4501 4502 { 4503 uint64_t offset; 4504 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4505 add(table0, table0, offset); 4506 } 4507 add(table1, table0, 1*256*sizeof(juint)); 4508 add(table2, table0, 2*256*sizeof(juint)); 4509 add(table3, table0, 3*256*sizeof(juint)); 4510 4511 { // Neon code start 4512 cmp(len, (u1)64); 4513 br(Assembler::LT, L_by16); 4514 eor(v16, T16B, v16, v16); 4515 4516 Label L_fold; 4517 4518 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4519 4520 ld1(v0, v1, T2D, post(buf, 32)); 4521 ld1r(v4, T2D, post(tmp, 8)); 4522 ld1r(v5, T2D, post(tmp, 8)); 4523 ld1r(v6, T2D, post(tmp, 8)); 4524 ld1r(v7, T2D, post(tmp, 8)); 4525 mov(v16, S, 0, crc); 4526 4527 eor(v0, T16B, v0, v16); 4528 sub(len, len, 64); 4529 4530 BIND(L_fold); 4531 pmull(v22, T8H, v0, v5, T8B); 4532 pmull(v20, T8H, v0, v7, T8B); 4533 pmull(v23, T8H, v0, v4, T8B); 4534 pmull(v21, T8H, v0, v6, T8B); 4535 4536 pmull2(v18, T8H, v0, v5, T16B); 4537 pmull2(v16, T8H, v0, v7, T16B); 4538 pmull2(v19, T8H, v0, v4, T16B); 4539 pmull2(v17, T8H, v0, v6, T16B); 4540 4541 uzp1(v24, T8H, v20, v22); 4542 uzp2(v25, T8H, v20, v22); 4543 eor(v20, T16B, v24, v25); 4544 4545 uzp1(v26, T8H, v16, v18); 4546 uzp2(v27, T8H, v16, v18); 4547 eor(v16, T16B, v26, v27); 4548 4549 ushll2(v22, T4S, v20, T8H, 8); 4550 ushll(v20, T4S, v20, T4H, 8); 4551 4552 ushll2(v18, T4S, v16, T8H, 8); 4553 ushll(v16, T4S, v16, T4H, 8); 4554 4555 eor(v22, T16B, v23, v22); 4556 eor(v18, T16B, v19, v18); 4557 eor(v20, T16B, v21, v20); 4558 eor(v16, T16B, v17, v16); 4559 4560 uzp1(v17, T2D, v16, v20); 4561 uzp2(v21, T2D, v16, v20); 4562 eor(v17, T16B, v17, v21); 4563 4564 ushll2(v20, T2D, v17, T4S, 16); 4565 ushll(v16, T2D, v17, T2S, 16); 4566 4567 eor(v20, T16B, v20, v22); 4568 eor(v16, T16B, v16, v18); 4569 4570 uzp1(v17, T2D, v20, v16); 4571 uzp2(v21, T2D, v20, v16); 4572 eor(v28, T16B, v17, v21); 4573 4574 pmull(v22, T8H, v1, v5, T8B); 4575 pmull(v20, T8H, v1, v7, T8B); 4576 pmull(v23, T8H, v1, v4, T8B); 4577 pmull(v21, T8H, v1, v6, T8B); 4578 4579 pmull2(v18, T8H, v1, v5, T16B); 4580 pmull2(v16, T8H, v1, v7, T16B); 4581 pmull2(v19, T8H, v1, v4, T16B); 4582 pmull2(v17, T8H, v1, v6, T16B); 4583 4584 ld1(v0, v1, T2D, post(buf, 32)); 4585 4586 uzp1(v24, T8H, v20, v22); 4587 uzp2(v25, T8H, v20, v22); 4588 eor(v20, T16B, v24, v25); 4589 4590 uzp1(v26, T8H, v16, v18); 4591 uzp2(v27, T8H, v16, v18); 4592 eor(v16, T16B, v26, v27); 4593 4594 ushll2(v22, T4S, v20, T8H, 8); 4595 ushll(v20, T4S, v20, T4H, 8); 4596 4597 ushll2(v18, T4S, v16, T8H, 8); 4598 ushll(v16, T4S, v16, T4H, 8); 4599 4600 eor(v22, T16B, v23, v22); 4601 eor(v18, T16B, v19, v18); 4602 eor(v20, T16B, v21, v20); 4603 eor(v16, T16B, v17, v16); 4604 4605 uzp1(v17, T2D, v16, v20); 4606 uzp2(v21, T2D, v16, v20); 4607 eor(v16, T16B, v17, v21); 4608 4609 ushll2(v20, T2D, v16, T4S, 16); 4610 ushll(v16, T2D, v16, T2S, 16); 4611 4612 eor(v20, T16B, v22, v20); 4613 eor(v16, T16B, v16, v18); 4614 4615 uzp1(v17, T2D, v20, v16); 4616 uzp2(v21, T2D, v20, v16); 4617 eor(v20, T16B, v17, v21); 4618 4619 shl(v16, T2D, v28, 1); 4620 shl(v17, T2D, v20, 1); 4621 4622 eor(v0, T16B, v0, v16); 4623 eor(v1, T16B, v1, v17); 4624 4625 subs(len, len, 32); 4626 br(Assembler::GE, L_fold); 4627 4628 mov(crc, 0); 4629 mov(tmp, v0, D, 0); 4630 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4631 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4632 mov(tmp, v0, D, 1); 4633 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4634 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4635 mov(tmp, v1, D, 0); 4636 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4637 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4638 mov(tmp, v1, D, 1); 4639 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4640 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4641 4642 add(len, len, 32); 4643 } // Neon code end 4644 4645 BIND(L_by16); 4646 subs(len, len, 16); 4647 br(Assembler::GE, L_by16_loop); 4648 adds(len, len, 16-4); 4649 br(Assembler::GE, L_by4_loop); 4650 adds(len, len, 4); 4651 br(Assembler::GT, L_by1_loop); 4652 b(L_exit); 4653 4654 BIND(L_by4_loop); 4655 ldrw(tmp, Address(post(buf, 4))); 4656 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4657 subs(len, len, 4); 4658 br(Assembler::GE, L_by4_loop); 4659 adds(len, len, 4); 4660 br(Assembler::LE, L_exit); 4661 BIND(L_by1_loop); 4662 subs(len, len, 1); 4663 ldrb(tmp, Address(post(buf, 1))); 4664 update_byte_crc32(crc, tmp, table0); 4665 br(Assembler::GT, L_by1_loop); 4666 b(L_exit); 4667 4668 align(CodeEntryAlignment); 4669 BIND(L_by16_loop); 4670 subs(len, len, 16); 4671 ldp(tmp, tmp3, Address(post(buf, 16))); 4672 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4673 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4674 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4675 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4676 br(Assembler::GE, L_by16_loop); 4677 adds(len, len, 16-4); 4678 br(Assembler::GE, L_by4_loop); 4679 adds(len, len, 4); 4680 br(Assembler::GT, L_by1_loop); 4681 BIND(L_exit); 4682 mvnw(crc, crc); 4683 } 4684 4685 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4686 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4687 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4688 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4689 4690 subs(tmp0, len, 384); 4691 br(Assembler::GE, CRC_by128_pre); 4692 BIND(CRC_less128); 4693 subs(len, len, 32); 4694 br(Assembler::GE, CRC_by32_loop); 4695 BIND(CRC_less32); 4696 adds(len, len, 32 - 4); 4697 br(Assembler::GE, CRC_by4_loop); 4698 adds(len, len, 4); 4699 br(Assembler::GT, CRC_by1_loop); 4700 b(L_exit); 4701 4702 BIND(CRC_by32_loop); 4703 ldp(tmp0, tmp1, Address(buf)); 4704 crc32cx(crc, crc, tmp0); 4705 ldr(tmp2, Address(buf, 16)); 4706 crc32cx(crc, crc, tmp1); 4707 ldr(tmp3, Address(buf, 24)); 4708 crc32cx(crc, crc, tmp2); 4709 add(buf, buf, 32); 4710 subs(len, len, 32); 4711 crc32cx(crc, crc, tmp3); 4712 br(Assembler::GE, CRC_by32_loop); 4713 cmn(len, (u1)32); 4714 br(Assembler::NE, CRC_less32); 4715 b(L_exit); 4716 4717 BIND(CRC_by4_loop); 4718 ldrw(tmp0, Address(post(buf, 4))); 4719 subs(len, len, 4); 4720 crc32cw(crc, crc, tmp0); 4721 br(Assembler::GE, CRC_by4_loop); 4722 adds(len, len, 4); 4723 br(Assembler::LE, L_exit); 4724 BIND(CRC_by1_loop); 4725 ldrb(tmp0, Address(post(buf, 1))); 4726 subs(len, len, 1); 4727 crc32cb(crc, crc, tmp0); 4728 br(Assembler::GT, CRC_by1_loop); 4729 b(L_exit); 4730 4731 BIND(CRC_by128_pre); 4732 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4733 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4734 mov(crc, 0); 4735 crc32cx(crc, crc, tmp0); 4736 crc32cx(crc, crc, tmp1); 4737 4738 cbnz(len, CRC_less128); 4739 4740 BIND(L_exit); 4741 } 4742 4743 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4744 Register len, Register tmp0, Register tmp1, Register tmp2, 4745 Register tmp3) { 4746 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4747 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4748 4749 subs(len, len, 128); 4750 br(Assembler::GE, CRC_by64_pre); 4751 BIND(CRC_less64); 4752 adds(len, len, 128-32); 4753 br(Assembler::GE, CRC_by32_loop); 4754 BIND(CRC_less32); 4755 adds(len, len, 32-4); 4756 br(Assembler::GE, CRC_by4_loop); 4757 adds(len, len, 4); 4758 br(Assembler::GT, CRC_by1_loop); 4759 b(L_exit); 4760 4761 BIND(CRC_by32_loop); 4762 ldp(tmp0, tmp1, Address(post(buf, 16))); 4763 subs(len, len, 32); 4764 crc32cx(crc, crc, tmp0); 4765 ldr(tmp2, Address(post(buf, 8))); 4766 crc32cx(crc, crc, tmp1); 4767 ldr(tmp3, Address(post(buf, 8))); 4768 crc32cx(crc, crc, tmp2); 4769 crc32cx(crc, crc, tmp3); 4770 br(Assembler::GE, CRC_by32_loop); 4771 cmn(len, (u1)32); 4772 br(Assembler::NE, CRC_less32); 4773 b(L_exit); 4774 4775 BIND(CRC_by4_loop); 4776 ldrw(tmp0, Address(post(buf, 4))); 4777 subs(len, len, 4); 4778 crc32cw(crc, crc, tmp0); 4779 br(Assembler::GE, CRC_by4_loop); 4780 adds(len, len, 4); 4781 br(Assembler::LE, L_exit); 4782 BIND(CRC_by1_loop); 4783 ldrb(tmp0, Address(post(buf, 1))); 4784 subs(len, len, 1); 4785 crc32cb(crc, crc, tmp0); 4786 br(Assembler::GT, CRC_by1_loop); 4787 b(L_exit); 4788 4789 BIND(CRC_by64_pre); 4790 sub(buf, buf, 8); 4791 ldp(tmp0, tmp1, Address(buf, 8)); 4792 crc32cx(crc, crc, tmp0); 4793 ldr(tmp2, Address(buf, 24)); 4794 crc32cx(crc, crc, tmp1); 4795 ldr(tmp3, Address(buf, 32)); 4796 crc32cx(crc, crc, tmp2); 4797 ldr(tmp0, Address(buf, 40)); 4798 crc32cx(crc, crc, tmp3); 4799 ldr(tmp1, Address(buf, 48)); 4800 crc32cx(crc, crc, tmp0); 4801 ldr(tmp2, Address(buf, 56)); 4802 crc32cx(crc, crc, tmp1); 4803 ldr(tmp3, Address(pre(buf, 64))); 4804 4805 b(CRC_by64_loop); 4806 4807 align(CodeEntryAlignment); 4808 BIND(CRC_by64_loop); 4809 subs(len, len, 64); 4810 crc32cx(crc, crc, tmp2); 4811 ldr(tmp0, Address(buf, 8)); 4812 crc32cx(crc, crc, tmp3); 4813 ldr(tmp1, Address(buf, 16)); 4814 crc32cx(crc, crc, tmp0); 4815 ldr(tmp2, Address(buf, 24)); 4816 crc32cx(crc, crc, tmp1); 4817 ldr(tmp3, Address(buf, 32)); 4818 crc32cx(crc, crc, tmp2); 4819 ldr(tmp0, Address(buf, 40)); 4820 crc32cx(crc, crc, tmp3); 4821 ldr(tmp1, Address(buf, 48)); 4822 crc32cx(crc, crc, tmp0); 4823 ldr(tmp2, Address(buf, 56)); 4824 crc32cx(crc, crc, tmp1); 4825 ldr(tmp3, Address(pre(buf, 64))); 4826 br(Assembler::GE, CRC_by64_loop); 4827 4828 // post-loop 4829 crc32cx(crc, crc, tmp2); 4830 crc32cx(crc, crc, tmp3); 4831 4832 sub(len, len, 64); 4833 add(buf, buf, 8); 4834 cmn(len, (u1)128); 4835 br(Assembler::NE, CRC_less64); 4836 BIND(L_exit); 4837 } 4838 4839 /** 4840 * @param crc register containing existing CRC (32-bit) 4841 * @param buf register pointing to input byte buffer (byte*) 4842 * @param len register containing number of bytes 4843 * @param table register that will contain address of CRC table 4844 * @param tmp scratch register 4845 */ 4846 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4847 Register table0, Register table1, Register table2, Register table3, 4848 Register tmp, Register tmp2, Register tmp3) { 4849 if (UseCryptoPmullForCRC32) { 4850 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4851 } else { 4852 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4853 } 4854 } 4855 4856 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4857 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4858 Label CRC_by128_loop; 4859 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4860 4861 sub(len, len, 256); 4862 Register table = tmp0; 4863 { 4864 uint64_t offset; 4865 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4866 add(table, table, offset); 4867 } 4868 add(table, table, table_offset); 4869 4870 // Registers v0..v7 are used as data registers. 4871 // Registers v16..v31 are used as tmp registers. 4872 sub(buf, buf, 0x10); 4873 ldrq(v0, Address(buf, 0x10)); 4874 ldrq(v1, Address(buf, 0x20)); 4875 ldrq(v2, Address(buf, 0x30)); 4876 ldrq(v3, Address(buf, 0x40)); 4877 ldrq(v4, Address(buf, 0x50)); 4878 ldrq(v5, Address(buf, 0x60)); 4879 ldrq(v6, Address(buf, 0x70)); 4880 ldrq(v7, Address(pre(buf, 0x80))); 4881 4882 movi(v31, T4S, 0); 4883 mov(v31, S, 0, crc); 4884 eor(v0, T16B, v0, v31); 4885 4886 // Register v16 contains constants from the crc table. 4887 ldrq(v16, Address(table)); 4888 b(CRC_by128_loop); 4889 4890 align(OptoLoopAlignment); 4891 BIND(CRC_by128_loop); 4892 pmull (v17, T1Q, v0, v16, T1D); 4893 pmull2(v18, T1Q, v0, v16, T2D); 4894 ldrq(v0, Address(buf, 0x10)); 4895 eor3(v0, T16B, v17, v18, v0); 4896 4897 pmull (v19, T1Q, v1, v16, T1D); 4898 pmull2(v20, T1Q, v1, v16, T2D); 4899 ldrq(v1, Address(buf, 0x20)); 4900 eor3(v1, T16B, v19, v20, v1); 4901 4902 pmull (v21, T1Q, v2, v16, T1D); 4903 pmull2(v22, T1Q, v2, v16, T2D); 4904 ldrq(v2, Address(buf, 0x30)); 4905 eor3(v2, T16B, v21, v22, v2); 4906 4907 pmull (v23, T1Q, v3, v16, T1D); 4908 pmull2(v24, T1Q, v3, v16, T2D); 4909 ldrq(v3, Address(buf, 0x40)); 4910 eor3(v3, T16B, v23, v24, v3); 4911 4912 pmull (v25, T1Q, v4, v16, T1D); 4913 pmull2(v26, T1Q, v4, v16, T2D); 4914 ldrq(v4, Address(buf, 0x50)); 4915 eor3(v4, T16B, v25, v26, v4); 4916 4917 pmull (v27, T1Q, v5, v16, T1D); 4918 pmull2(v28, T1Q, v5, v16, T2D); 4919 ldrq(v5, Address(buf, 0x60)); 4920 eor3(v5, T16B, v27, v28, v5); 4921 4922 pmull (v29, T1Q, v6, v16, T1D); 4923 pmull2(v30, T1Q, v6, v16, T2D); 4924 ldrq(v6, Address(buf, 0x70)); 4925 eor3(v6, T16B, v29, v30, v6); 4926 4927 // Reuse registers v23, v24. 4928 // Using them won't block the first instruction of the next iteration. 4929 pmull (v23, T1Q, v7, v16, T1D); 4930 pmull2(v24, T1Q, v7, v16, T2D); 4931 ldrq(v7, Address(pre(buf, 0x80))); 4932 eor3(v7, T16B, v23, v24, v7); 4933 4934 subs(len, len, 0x80); 4935 br(Assembler::GE, CRC_by128_loop); 4936 4937 // fold into 512 bits 4938 // Use v31 for constants because v16 can be still in use. 4939 ldrq(v31, Address(table, 0x10)); 4940 4941 pmull (v17, T1Q, v0, v31, T1D); 4942 pmull2(v18, T1Q, v0, v31, T2D); 4943 eor3(v0, T16B, v17, v18, v4); 4944 4945 pmull (v19, T1Q, v1, v31, T1D); 4946 pmull2(v20, T1Q, v1, v31, T2D); 4947 eor3(v1, T16B, v19, v20, v5); 4948 4949 pmull (v21, T1Q, v2, v31, T1D); 4950 pmull2(v22, T1Q, v2, v31, T2D); 4951 eor3(v2, T16B, v21, v22, v6); 4952 4953 pmull (v23, T1Q, v3, v31, T1D); 4954 pmull2(v24, T1Q, v3, v31, T2D); 4955 eor3(v3, T16B, v23, v24, v7); 4956 4957 // fold into 128 bits 4958 // Use v17 for constants because v31 can be still in use. 4959 ldrq(v17, Address(table, 0x20)); 4960 pmull (v25, T1Q, v0, v17, T1D); 4961 pmull2(v26, T1Q, v0, v17, T2D); 4962 eor3(v3, T16B, v3, v25, v26); 4963 4964 // Use v18 for constants because v17 can be still in use. 4965 ldrq(v18, Address(table, 0x30)); 4966 pmull (v27, T1Q, v1, v18, T1D); 4967 pmull2(v28, T1Q, v1, v18, T2D); 4968 eor3(v3, T16B, v3, v27, v28); 4969 4970 // Use v19 for constants because v18 can be still in use. 4971 ldrq(v19, Address(table, 0x40)); 4972 pmull (v29, T1Q, v2, v19, T1D); 4973 pmull2(v30, T1Q, v2, v19, T2D); 4974 eor3(v0, T16B, v3, v29, v30); 4975 4976 add(len, len, 0x80); 4977 add(buf, buf, 0x10); 4978 4979 mov(tmp0, v0, D, 0); 4980 mov(tmp1, v0, D, 1); 4981 } 4982 4983 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4984 Address adr; 4985 switch(dst.getMode()) { 4986 case Address::base_plus_offset: 4987 // This is the expected mode, although we allow all the other 4988 // forms below. 4989 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4990 break; 4991 default: 4992 lea(rscratch2, dst); 4993 adr = Address(rscratch2); 4994 break; 4995 } 4996 ldr(rscratch1, adr); 4997 add(rscratch1, rscratch1, src); 4998 str(rscratch1, adr); 4999 } 5000 5001 void MacroAssembler::cmpptr(Register src1, Address src2) { 5002 uint64_t offset; 5003 adrp(rscratch1, src2, offset); 5004 ldr(rscratch1, Address(rscratch1, offset)); 5005 cmp(src1, rscratch1); 5006 } 5007 5008 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 5009 cmp(obj1, obj2); 5010 } 5011 5012 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5013 load_method_holder(rresult, rmethod); 5014 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5015 } 5016 5017 void MacroAssembler::load_method_holder(Register holder, Register method) { 5018 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 5019 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5020 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5021 } 5022 5023 // Loads the obj's Klass* into dst. 5024 // Preserves all registers (incl src, rscratch1 and rscratch2). 5025 // Input: 5026 // src - the oop we want to load the klass from. 5027 // dst - output narrow klass. 5028 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { 5029 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); 5030 ldrw(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5031 lsrw(dst, dst, markWord::klass_shift); 5032 } 5033 5034 void MacroAssembler::load_klass(Register dst, Register src) { 5035 if (UseCompactObjectHeaders) { 5036 load_narrow_klass_compact(dst, src); 5037 decode_klass_not_null(dst); 5038 } else if (UseCompressedClassPointers) { 5039 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5040 decode_klass_not_null(dst); 5041 } else { 5042 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5043 } 5044 } 5045 5046 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 5047 if (RestoreMXCSROnJNICalls) { 5048 Label OK; 5049 get_fpcr(tmp1); 5050 mov(tmp2, tmp1); 5051 // Set FPCR to the state we need. We do want Round to Nearest. We 5052 // don't want non-IEEE rounding modes or floating-point traps. 5053 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 5054 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 5055 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 5056 eor(tmp2, tmp1, tmp2); 5057 cbz(tmp2, OK); // Only reset FPCR if it's wrong 5058 set_fpcr(tmp1); 5059 bind(OK); 5060 } 5061 } 5062 5063 // ((OopHandle)result).resolve(); 5064 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 5065 // OopHandle::resolve is an indirection. 5066 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 5067 } 5068 5069 // ((WeakHandle)result).resolve(); 5070 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 5071 assert_different_registers(result, tmp1, tmp2); 5072 Label resolved; 5073 5074 // A null weak handle resolves to null. 5075 cbz(result, resolved); 5076 5077 // Only 64 bit platforms support GCs that require a tmp register 5078 // WeakHandle::resolve is an indirection like jweak. 5079 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5080 result, Address(result), tmp1, tmp2); 5081 bind(resolved); 5082 } 5083 5084 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 5085 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5086 ldr(dst, Address(rmethod, Method::const_offset())); 5087 ldr(dst, Address(dst, ConstMethod::constants_offset())); 5088 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 5089 ldr(dst, Address(dst, mirror_offset)); 5090 resolve_oop_handle(dst, tmp1, tmp2); 5091 } 5092 5093 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) { 5094 assert_different_registers(obj, klass, tmp); 5095 if (UseCompressedClassPointers) { 5096 if (UseCompactObjectHeaders) { 5097 load_narrow_klass_compact(tmp, obj); 5098 } else { 5099 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); 5100 } 5101 if (CompressedKlassPointers::base() == nullptr) { 5102 cmp(klass, tmp, LSL, CompressedKlassPointers::shift()); 5103 return; 5104 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 5105 && CompressedKlassPointers::shift() == 0) { 5106 // Only the bottom 32 bits matter 5107 cmpw(klass, tmp); 5108 return; 5109 } 5110 decode_klass_not_null(tmp); 5111 } else { 5112 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); 5113 } 5114 cmp(klass, tmp); 5115 } 5116 5117 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { 5118 if (UseCompactObjectHeaders) { 5119 load_narrow_klass_compact(tmp1, obj1); 5120 load_narrow_klass_compact(tmp2, obj2); 5121 cmpw(tmp1, tmp2); 5122 } else if (UseCompressedClassPointers) { 5123 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5124 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); 5125 cmpw(tmp1, tmp2); 5126 } else { 5127 ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5128 ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); 5129 cmp(tmp1, tmp2); 5130 } 5131 } 5132 5133 void MacroAssembler::store_klass(Register dst, Register src) { 5134 // FIXME: Should this be a store release? concurrent gcs assumes 5135 // klass length is valid if klass field is not null. 5136 assert(!UseCompactObjectHeaders, "not with compact headers"); 5137 if (UseCompressedClassPointers) { 5138 encode_klass_not_null(src); 5139 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5140 } else { 5141 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5142 } 5143 } 5144 5145 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5146 assert(!UseCompactObjectHeaders, "not with compact headers"); 5147 if (UseCompressedClassPointers) { 5148 // Store to klass gap in destination 5149 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 5150 } 5151 } 5152 5153 // Algorithm must match CompressedOops::encode. 5154 void MacroAssembler::encode_heap_oop(Register d, Register s) { 5155 #ifdef ASSERT 5156 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5157 #endif 5158 verify_oop_msg(s, "broken oop in encode_heap_oop"); 5159 if (CompressedOops::base() == nullptr) { 5160 if (CompressedOops::shift() != 0) { 5161 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5162 lsr(d, s, LogMinObjAlignmentInBytes); 5163 } else { 5164 mov(d, s); 5165 } 5166 } else { 5167 subs(d, s, rheapbase); 5168 csel(d, d, zr, Assembler::HS); 5169 lsr(d, d, LogMinObjAlignmentInBytes); 5170 5171 /* Old algorithm: is this any worse? 5172 Label nonnull; 5173 cbnz(r, nonnull); 5174 sub(r, r, rheapbase); 5175 bind(nonnull); 5176 lsr(r, r, LogMinObjAlignmentInBytes); 5177 */ 5178 } 5179 } 5180 5181 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5182 #ifdef ASSERT 5183 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5184 if (CheckCompressedOops) { 5185 Label ok; 5186 cbnz(r, ok); 5187 stop("null oop passed to encode_heap_oop_not_null"); 5188 bind(ok); 5189 } 5190 #endif 5191 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5192 if (CompressedOops::base() != nullptr) { 5193 sub(r, r, rheapbase); 5194 } 5195 if (CompressedOops::shift() != 0) { 5196 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5197 lsr(r, r, LogMinObjAlignmentInBytes); 5198 } 5199 } 5200 5201 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5202 #ifdef ASSERT 5203 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5204 if (CheckCompressedOops) { 5205 Label ok; 5206 cbnz(src, ok); 5207 stop("null oop passed to encode_heap_oop_not_null2"); 5208 bind(ok); 5209 } 5210 #endif 5211 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5212 5213 Register data = src; 5214 if (CompressedOops::base() != nullptr) { 5215 sub(dst, src, rheapbase); 5216 data = dst; 5217 } 5218 if (CompressedOops::shift() != 0) { 5219 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5220 lsr(dst, data, LogMinObjAlignmentInBytes); 5221 data = dst; 5222 } 5223 if (data == src) 5224 mov(dst, src); 5225 } 5226 5227 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5228 #ifdef ASSERT 5229 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5230 #endif 5231 if (CompressedOops::base() == nullptr) { 5232 if (CompressedOops::shift() != 0) { 5233 lsl(d, s, CompressedOops::shift()); 5234 } else if (d != s) { 5235 mov(d, s); 5236 } 5237 } else { 5238 Label done; 5239 if (d != s) 5240 mov(d, s); 5241 cbz(s, done); 5242 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5243 bind(done); 5244 } 5245 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5246 } 5247 5248 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5249 assert (UseCompressedOops, "should only be used for compressed headers"); 5250 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5251 // Cannot assert, unverified entry point counts instructions (see .ad file) 5252 // vtableStubs also counts instructions in pd_code_size_limit. 5253 // Also do not verify_oop as this is called by verify_oop. 5254 if (CompressedOops::shift() != 0) { 5255 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5256 if (CompressedOops::base() != nullptr) { 5257 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5258 } else { 5259 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5260 } 5261 } else { 5262 assert (CompressedOops::base() == nullptr, "sanity"); 5263 } 5264 } 5265 5266 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5267 assert (UseCompressedOops, "should only be used for compressed headers"); 5268 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5269 // Cannot assert, unverified entry point counts instructions (see .ad file) 5270 // vtableStubs also counts instructions in pd_code_size_limit. 5271 // Also do not verify_oop as this is called by verify_oop. 5272 if (CompressedOops::shift() != 0) { 5273 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5274 if (CompressedOops::base() != nullptr) { 5275 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5276 } else { 5277 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5278 } 5279 } else { 5280 assert (CompressedOops::base() == nullptr, "sanity"); 5281 if (dst != src) { 5282 mov(dst, src); 5283 } 5284 } 5285 } 5286 5287 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5288 5289 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5290 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5291 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized"); 5292 return _klass_decode_mode; 5293 } 5294 5295 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) { 5296 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5297 5298 // KlassDecodeMode shouldn't be set already. 5299 assert(_klass_decode_mode == KlassDecodeNone, "set once"); 5300 5301 if (base == nullptr) { 5302 return KlassDecodeZero; 5303 } 5304 5305 if (operand_valid_for_logical_immediate( 5306 /*is32*/false, (uint64_t)base)) { 5307 const uint64_t range_mask = right_n_bits(log2i_ceil(range)); 5308 if (((uint64_t)base & range_mask) == 0) { 5309 return KlassDecodeXor; 5310 } 5311 } 5312 5313 const uint64_t shifted_base = 5314 (uint64_t)base >> shift; 5315 if ((shifted_base & 0xffff0000ffffffff) == 0) { 5316 return KlassDecodeMovk; 5317 } 5318 5319 // No valid encoding. 5320 return KlassDecodeNone; 5321 } 5322 5323 // Check if one of the above decoding modes will work for given base, shift and range. 5324 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) { 5325 return klass_decode_mode(base, shift, range) != KlassDecodeNone; 5326 } 5327 5328 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) { 5329 _klass_decode_mode = klass_decode_mode(base, shift, range); 5330 return _klass_decode_mode != KlassDecodeNone; 5331 } 5332 5333 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5334 switch (klass_decode_mode()) { 5335 case KlassDecodeZero: 5336 if (CompressedKlassPointers::shift() != 0) { 5337 lsr(dst, src, CompressedKlassPointers::shift()); 5338 } else { 5339 if (dst != src) mov(dst, src); 5340 } 5341 break; 5342 5343 case KlassDecodeXor: 5344 if (CompressedKlassPointers::shift() != 0) { 5345 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5346 lsr(dst, dst, CompressedKlassPointers::shift()); 5347 } else { 5348 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5349 } 5350 break; 5351 5352 case KlassDecodeMovk: 5353 if (CompressedKlassPointers::shift() != 0) { 5354 ubfx(dst, src, CompressedKlassPointers::shift(), 32); 5355 } else { 5356 movw(dst, src); 5357 } 5358 break; 5359 5360 case KlassDecodeNone: 5361 ShouldNotReachHere(); 5362 break; 5363 } 5364 } 5365 5366 void MacroAssembler::encode_klass_not_null(Register r) { 5367 encode_klass_not_null(r, r); 5368 } 5369 5370 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5371 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5372 5373 switch (klass_decode_mode()) { 5374 case KlassDecodeZero: 5375 if (CompressedKlassPointers::shift() != 0) { 5376 lsl(dst, src, CompressedKlassPointers::shift()); 5377 } else { 5378 if (dst != src) mov(dst, src); 5379 } 5380 break; 5381 5382 case KlassDecodeXor: 5383 if (CompressedKlassPointers::shift() != 0) { 5384 lsl(dst, src, CompressedKlassPointers::shift()); 5385 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5386 } else { 5387 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5388 } 5389 break; 5390 5391 case KlassDecodeMovk: { 5392 const uint64_t shifted_base = 5393 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5394 5395 if (dst != src) movw(dst, src); 5396 movk(dst, shifted_base >> 32, 32); 5397 5398 if (CompressedKlassPointers::shift() != 0) { 5399 lsl(dst, dst, CompressedKlassPointers::shift()); 5400 } 5401 5402 break; 5403 } 5404 5405 case KlassDecodeNone: 5406 ShouldNotReachHere(); 5407 break; 5408 } 5409 } 5410 5411 void MacroAssembler::decode_klass_not_null(Register r) { 5412 decode_klass_not_null(r, r); 5413 } 5414 5415 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5416 #ifdef ASSERT 5417 { 5418 ThreadInVMfromUnknown tiv; 5419 assert (UseCompressedOops, "should only be used for compressed oops"); 5420 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5421 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5422 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5423 } 5424 #endif 5425 int oop_index = oop_recorder()->find_index(obj); 5426 InstructionMark im(this); 5427 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5428 code_section()->relocate(inst_mark(), rspec); 5429 movz(dst, 0xDEAD, 16); 5430 movk(dst, 0xBEEF); 5431 } 5432 5433 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5434 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5435 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5436 int index = oop_recorder()->find_index(k); 5437 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5438 5439 InstructionMark im(this); 5440 RelocationHolder rspec = metadata_Relocation::spec(index); 5441 code_section()->relocate(inst_mark(), rspec); 5442 narrowKlass nk = CompressedKlassPointers::encode(k); 5443 movz(dst, (nk >> 16), 16); 5444 movk(dst, nk & 0xffff); 5445 } 5446 5447 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5448 Register dst, Address src, 5449 Register tmp1, Register tmp2) { 5450 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5451 decorators = AccessInternal::decorator_fixup(decorators, type); 5452 bool as_raw = (decorators & AS_RAW) != 0; 5453 if (as_raw) { 5454 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5455 } else { 5456 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5457 } 5458 } 5459 5460 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5461 Address dst, Register val, 5462 Register tmp1, Register tmp2, Register tmp3) { 5463 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5464 decorators = AccessInternal::decorator_fixup(decorators, type); 5465 bool as_raw = (decorators & AS_RAW) != 0; 5466 if (as_raw) { 5467 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5468 } else { 5469 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5470 } 5471 } 5472 5473 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5474 Register tmp2, DecoratorSet decorators) { 5475 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5476 } 5477 5478 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5479 Register tmp2, DecoratorSet decorators) { 5480 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5481 } 5482 5483 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5484 Register tmp2, Register tmp3, DecoratorSet decorators) { 5485 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5486 } 5487 5488 // Used for storing nulls. 5489 void MacroAssembler::store_heap_oop_null(Address dst) { 5490 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5491 } 5492 5493 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5494 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5495 int index = oop_recorder()->allocate_metadata_index(obj); 5496 RelocationHolder rspec = metadata_Relocation::spec(index); 5497 return Address((address)obj, rspec); 5498 } 5499 5500 // Move an oop into a register. 5501 void MacroAssembler::movoop(Register dst, jobject obj) { 5502 int oop_index; 5503 if (obj == nullptr) { 5504 oop_index = oop_recorder()->allocate_oop_index(obj); 5505 } else { 5506 #ifdef ASSERT 5507 { 5508 ThreadInVMfromUnknown tiv; 5509 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5510 } 5511 #endif 5512 oop_index = oop_recorder()->find_index(obj); 5513 } 5514 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5515 5516 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5517 mov(dst, Address((address)obj, rspec)); 5518 } else { 5519 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5520 ldr(dst, Address(dummy, rspec)); 5521 } 5522 } 5523 5524 // Move a metadata address into a register. 5525 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5526 int oop_index; 5527 if (obj == nullptr) { 5528 oop_index = oop_recorder()->allocate_metadata_index(obj); 5529 } else { 5530 oop_index = oop_recorder()->find_index(obj); 5531 } 5532 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5533 mov(dst, Address((address)obj, rspec)); 5534 } 5535 5536 Address MacroAssembler::constant_oop_address(jobject obj) { 5537 #ifdef ASSERT 5538 { 5539 ThreadInVMfromUnknown tiv; 5540 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5541 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5542 } 5543 #endif 5544 int oop_index = oop_recorder()->find_index(obj); 5545 return Address((address)obj, oop_Relocation::spec(oop_index)); 5546 } 5547 5548 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5549 void MacroAssembler::tlab_allocate(Register obj, 5550 Register var_size_in_bytes, 5551 int con_size_in_bytes, 5552 Register t1, 5553 Register t2, 5554 Label& slow_case) { 5555 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5556 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5557 } 5558 5559 void MacroAssembler::inc_held_monitor_count(Register tmp) { 5560 Address dst(rthread, JavaThread::held_monitor_count_offset()); 5561 #ifdef ASSERT 5562 ldr(tmp, dst); 5563 increment(tmp); 5564 str(tmp, dst); 5565 Label ok; 5566 tbz(tmp, 63, ok); 5567 STOP("assert(held monitor count underflow)"); 5568 should_not_reach_here(); 5569 bind(ok); 5570 #else 5571 increment(dst); 5572 #endif 5573 } 5574 5575 void MacroAssembler::dec_held_monitor_count(Register tmp) { 5576 Address dst(rthread, JavaThread::held_monitor_count_offset()); 5577 #ifdef ASSERT 5578 ldr(tmp, dst); 5579 decrement(tmp); 5580 str(tmp, dst); 5581 Label ok; 5582 tbz(tmp, 63, ok); 5583 STOP("assert(held monitor count underflow)"); 5584 should_not_reach_here(); 5585 bind(ok); 5586 #else 5587 decrement(dst); 5588 #endif 5589 } 5590 5591 void MacroAssembler::verify_tlab() { 5592 #ifdef ASSERT 5593 if (UseTLAB && VerifyOops) { 5594 Label next, ok; 5595 5596 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5597 5598 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5599 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5600 cmp(rscratch2, rscratch1); 5601 br(Assembler::HS, next); 5602 STOP("assert(top >= start)"); 5603 should_not_reach_here(); 5604 5605 bind(next); 5606 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5607 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5608 cmp(rscratch2, rscratch1); 5609 br(Assembler::HS, ok); 5610 STOP("assert(top <= end)"); 5611 should_not_reach_here(); 5612 5613 bind(ok); 5614 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5615 } 5616 #endif 5617 } 5618 5619 // Writes to stack successive pages until offset reached to check for 5620 // stack overflow + shadow pages. This clobbers tmp. 5621 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5622 assert_different_registers(tmp, size, rscratch1); 5623 mov(tmp, sp); 5624 // Bang stack for total size given plus shadow page size. 5625 // Bang one page at a time because large size can bang beyond yellow and 5626 // red zones. 5627 Label loop; 5628 mov(rscratch1, (int)os::vm_page_size()); 5629 bind(loop); 5630 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5631 subsw(size, size, rscratch1); 5632 str(size, Address(tmp)); 5633 br(Assembler::GT, loop); 5634 5635 // Bang down shadow pages too. 5636 // At this point, (tmp-0) is the last address touched, so don't 5637 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5638 // was post-decremented.) Skip this address by starting at i=1, and 5639 // touch a few more pages below. N.B. It is important to touch all 5640 // the way down to and including i=StackShadowPages. 5641 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5642 // this could be any sized move but this is can be a debugging crumb 5643 // so the bigger the better. 5644 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5645 str(size, Address(tmp)); 5646 } 5647 } 5648 5649 // Move the address of the polling page into dest. 5650 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5651 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5652 } 5653 5654 // Read the polling page. The address of the polling page must 5655 // already be in r. 5656 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5657 address mark; 5658 { 5659 InstructionMark im(this); 5660 code_section()->relocate(inst_mark(), rtype); 5661 ldrw(zr, Address(r, 0)); 5662 mark = inst_mark(); 5663 } 5664 verify_cross_modify_fence_not_required(); 5665 return mark; 5666 } 5667 5668 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5669 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5670 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5671 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5672 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5673 int64_t offset_low = dest_page - low_page; 5674 int64_t offset_high = dest_page - high_page; 5675 5676 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5677 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5678 5679 InstructionMark im(this); 5680 code_section()->relocate(inst_mark(), dest.rspec()); 5681 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5682 // the code cache so that if it is relocated we know it will still reach 5683 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5684 _adrp(reg1, dest.target()); 5685 } else { 5686 uint64_t target = (uint64_t)dest.target(); 5687 uint64_t adrp_target 5688 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5689 5690 _adrp(reg1, (address)adrp_target); 5691 movk(reg1, target >> 32, 32); 5692 } 5693 byte_offset = (uint64_t)dest.target() & 0xfff; 5694 } 5695 5696 void MacroAssembler::load_byte_map_base(Register reg) { 5697 CardTable::CardValue* byte_map_base = 5698 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5699 5700 // Strictly speaking the byte_map_base isn't an address at all, and it might 5701 // even be negative. It is thus materialised as a constant. 5702 mov(reg, (uint64_t)byte_map_base); 5703 } 5704 5705 void MacroAssembler::build_frame(int framesize) { 5706 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5707 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5708 protect_return_address(); 5709 if (framesize < ((1 << 9) + 2 * wordSize)) { 5710 sub(sp, sp, framesize); 5711 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5712 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5713 } else { 5714 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5715 if (PreserveFramePointer) mov(rfp, sp); 5716 if (framesize < ((1 << 12) + 2 * wordSize)) 5717 sub(sp, sp, framesize - 2 * wordSize); 5718 else { 5719 mov(rscratch1, framesize - 2 * wordSize); 5720 sub(sp, sp, rscratch1); 5721 } 5722 } 5723 verify_cross_modify_fence_not_required(); 5724 } 5725 5726 void MacroAssembler::remove_frame(int framesize) { 5727 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5728 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5729 if (framesize < ((1 << 9) + 2 * wordSize)) { 5730 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5731 add(sp, sp, framesize); 5732 } else { 5733 if (framesize < ((1 << 12) + 2 * wordSize)) 5734 add(sp, sp, framesize - 2 * wordSize); 5735 else { 5736 mov(rscratch1, framesize - 2 * wordSize); 5737 add(sp, sp, rscratch1); 5738 } 5739 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5740 } 5741 authenticate_return_address(); 5742 } 5743 5744 5745 // This method counts leading positive bytes (highest bit not set) in provided byte array 5746 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5747 // Simple and most common case of aligned small array which is not at the 5748 // end of memory page is placed here. All other cases are in stub. 5749 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5750 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5751 assert_different_registers(ary1, len, result); 5752 5753 mov(result, len); 5754 cmpw(len, 0); 5755 br(LE, DONE); 5756 cmpw(len, 4 * wordSize); 5757 br(GE, STUB_LONG); // size > 32 then go to stub 5758 5759 int shift = 64 - exact_log2(os::vm_page_size()); 5760 lsl(rscratch1, ary1, shift); 5761 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5762 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5763 br(CS, STUB); // at the end of page then go to stub 5764 subs(len, len, wordSize); 5765 br(LT, END); 5766 5767 BIND(LOOP); 5768 ldr(rscratch1, Address(post(ary1, wordSize))); 5769 tst(rscratch1, UPPER_BIT_MASK); 5770 br(NE, SET_RESULT); 5771 subs(len, len, wordSize); 5772 br(GE, LOOP); 5773 cmpw(len, -wordSize); 5774 br(EQ, DONE); 5775 5776 BIND(END); 5777 ldr(rscratch1, Address(ary1)); 5778 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5779 lslv(rscratch1, rscratch1, rscratch2); 5780 tst(rscratch1, UPPER_BIT_MASK); 5781 br(NE, SET_RESULT); 5782 b(DONE); 5783 5784 BIND(STUB); 5785 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5786 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5787 address tpc1 = trampoline_call(count_pos); 5788 if (tpc1 == nullptr) { 5789 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5790 postcond(pc() == badAddress); 5791 return nullptr; 5792 } 5793 b(DONE); 5794 5795 BIND(STUB_LONG); 5796 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5797 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5798 address tpc2 = trampoline_call(count_pos_long); 5799 if (tpc2 == nullptr) { 5800 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5801 postcond(pc() == badAddress); 5802 return nullptr; 5803 } 5804 b(DONE); 5805 5806 BIND(SET_RESULT); 5807 5808 add(len, len, wordSize); 5809 sub(result, result, len); 5810 5811 BIND(DONE); 5812 postcond(pc() != badAddress); 5813 return pc(); 5814 } 5815 5816 // Clobbers: rscratch1, rscratch2, rflags 5817 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5818 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5819 Register tmp4, Register tmp5, Register result, 5820 Register cnt1, int elem_size) { 5821 Label DONE, SAME; 5822 Register tmp1 = rscratch1; 5823 Register tmp2 = rscratch2; 5824 Register cnt2 = tmp2; // cnt2 only used in array length compare 5825 int elem_per_word = wordSize/elem_size; 5826 int log_elem_size = exact_log2(elem_size); 5827 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5828 int base_offset 5829 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5830 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5831 5832 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5833 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5834 5835 #ifndef PRODUCT 5836 { 5837 const char kind = (elem_size == 2) ? 'U' : 'L'; 5838 char comment[64]; 5839 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5840 BLOCK_COMMENT(comment); 5841 } 5842 #endif 5843 5844 // if (a1 == a2) 5845 // return true; 5846 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5847 br(EQ, SAME); 5848 5849 if (UseSimpleArrayEquals) { 5850 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5851 // if (a1 == nullptr || a2 == nullptr) 5852 // return false; 5853 // a1 & a2 == 0 means (some-pointer is null) or 5854 // (very-rare-or-even-probably-impossible-pointer-values) 5855 // so, we can save one branch in most cases 5856 tst(a1, a2); 5857 mov(result, false); 5858 br(EQ, A_MIGHT_BE_NULL); 5859 // if (a1.length != a2.length) 5860 // return false; 5861 bind(A_IS_NOT_NULL); 5862 ldrw(cnt1, Address(a1, length_offset)); 5863 ldrw(cnt2, Address(a2, length_offset)); 5864 eorw(tmp5, cnt1, cnt2); 5865 cbnzw(tmp5, DONE); 5866 lea(a1, Address(a1, base_offset)); 5867 lea(a2, Address(a2, base_offset)); 5868 // Check for short strings, i.e. smaller than wordSize. 5869 subs(cnt1, cnt1, elem_per_word); 5870 br(Assembler::LT, SHORT); 5871 // Main 8 byte comparison loop. 5872 bind(NEXT_WORD); { 5873 ldr(tmp1, Address(post(a1, wordSize))); 5874 ldr(tmp2, Address(post(a2, wordSize))); 5875 subs(cnt1, cnt1, elem_per_word); 5876 eor(tmp5, tmp1, tmp2); 5877 cbnz(tmp5, DONE); 5878 } br(GT, NEXT_WORD); 5879 // Last longword. In the case where length == 4 we compare the 5880 // same longword twice, but that's still faster than another 5881 // conditional branch. 5882 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5883 // length == 4. 5884 if (log_elem_size > 0) 5885 lsl(cnt1, cnt1, log_elem_size); 5886 ldr(tmp3, Address(a1, cnt1)); 5887 ldr(tmp4, Address(a2, cnt1)); 5888 eor(tmp5, tmp3, tmp4); 5889 cbnz(tmp5, DONE); 5890 b(SAME); 5891 bind(A_MIGHT_BE_NULL); 5892 // in case both a1 and a2 are not-null, proceed with loads 5893 cbz(a1, DONE); 5894 cbz(a2, DONE); 5895 b(A_IS_NOT_NULL); 5896 bind(SHORT); 5897 5898 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5899 { 5900 ldrw(tmp1, Address(post(a1, 4))); 5901 ldrw(tmp2, Address(post(a2, 4))); 5902 eorw(tmp5, tmp1, tmp2); 5903 cbnzw(tmp5, DONE); 5904 } 5905 bind(TAIL03); 5906 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5907 { 5908 ldrh(tmp3, Address(post(a1, 2))); 5909 ldrh(tmp4, Address(post(a2, 2))); 5910 eorw(tmp5, tmp3, tmp4); 5911 cbnzw(tmp5, DONE); 5912 } 5913 bind(TAIL01); 5914 if (elem_size == 1) { // Only needed when comparing byte arrays. 5915 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5916 { 5917 ldrb(tmp1, a1); 5918 ldrb(tmp2, a2); 5919 eorw(tmp5, tmp1, tmp2); 5920 cbnzw(tmp5, DONE); 5921 } 5922 } 5923 } else { 5924 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5925 CSET_EQ, LAST_CHECK; 5926 mov(result, false); 5927 cbz(a1, DONE); 5928 ldrw(cnt1, Address(a1, length_offset)); 5929 cbz(a2, DONE); 5930 ldrw(cnt2, Address(a2, length_offset)); 5931 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5932 // faster to perform another branch before comparing a1 and a2 5933 cmp(cnt1, (u1)elem_per_word); 5934 br(LE, SHORT); // short or same 5935 ldr(tmp3, Address(pre(a1, base_offset))); 5936 subs(zr, cnt1, stubBytesThreshold); 5937 br(GE, STUB); 5938 ldr(tmp4, Address(pre(a2, base_offset))); 5939 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5940 cmp(cnt2, cnt1); 5941 br(NE, DONE); 5942 5943 // Main 16 byte comparison loop with 2 exits 5944 bind(NEXT_DWORD); { 5945 ldr(tmp1, Address(pre(a1, wordSize))); 5946 ldr(tmp2, Address(pre(a2, wordSize))); 5947 subs(cnt1, cnt1, 2 * elem_per_word); 5948 br(LE, TAIL); 5949 eor(tmp4, tmp3, tmp4); 5950 cbnz(tmp4, DONE); 5951 ldr(tmp3, Address(pre(a1, wordSize))); 5952 ldr(tmp4, Address(pre(a2, wordSize))); 5953 cmp(cnt1, (u1)elem_per_word); 5954 br(LE, TAIL2); 5955 cmp(tmp1, tmp2); 5956 } br(EQ, NEXT_DWORD); 5957 b(DONE); 5958 5959 bind(TAIL); 5960 eor(tmp4, tmp3, tmp4); 5961 eor(tmp2, tmp1, tmp2); 5962 lslv(tmp2, tmp2, tmp5); 5963 orr(tmp5, tmp4, tmp2); 5964 cmp(tmp5, zr); 5965 b(CSET_EQ); 5966 5967 bind(TAIL2); 5968 eor(tmp2, tmp1, tmp2); 5969 cbnz(tmp2, DONE); 5970 b(LAST_CHECK); 5971 5972 bind(STUB); 5973 ldr(tmp4, Address(pre(a2, base_offset))); 5974 cmp(cnt2, cnt1); 5975 br(NE, DONE); 5976 if (elem_size == 2) { // convert to byte counter 5977 lsl(cnt1, cnt1, 1); 5978 } 5979 eor(tmp5, tmp3, tmp4); 5980 cbnz(tmp5, DONE); 5981 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5982 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5983 address tpc = trampoline_call(stub); 5984 if (tpc == nullptr) { 5985 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5986 postcond(pc() == badAddress); 5987 return nullptr; 5988 } 5989 b(DONE); 5990 5991 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5992 // so, if a2 == null => return false(0), else return true, so we can return a2 5993 mov(result, a2); 5994 b(DONE); 5995 bind(SHORT); 5996 cmp(cnt2, cnt1); 5997 br(NE, DONE); 5998 cbz(cnt1, SAME); 5999 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6000 ldr(tmp3, Address(a1, base_offset)); 6001 ldr(tmp4, Address(a2, base_offset)); 6002 bind(LAST_CHECK); 6003 eor(tmp4, tmp3, tmp4); 6004 lslv(tmp5, tmp4, tmp5); 6005 cmp(tmp5, zr); 6006 bind(CSET_EQ); 6007 cset(result, EQ); 6008 b(DONE); 6009 } 6010 6011 bind(SAME); 6012 mov(result, true); 6013 // That's it. 6014 bind(DONE); 6015 6016 BLOCK_COMMENT("} array_equals"); 6017 postcond(pc() != badAddress); 6018 return pc(); 6019 } 6020 6021 // Compare Strings 6022 6023 // For Strings we're passed the address of the first characters in a1 6024 // and a2 and the length in cnt1. 6025 // There are two implementations. For arrays >= 8 bytes, all 6026 // comparisons (including the final one, which may overlap) are 6027 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 6028 // halfword, then a short, and then a byte. 6029 6030 void MacroAssembler::string_equals(Register a1, Register a2, 6031 Register result, Register cnt1) 6032 { 6033 Label SAME, DONE, SHORT, NEXT_WORD; 6034 Register tmp1 = rscratch1; 6035 Register tmp2 = rscratch2; 6036 Register cnt2 = tmp2; // cnt2 only used in array length compare 6037 6038 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 6039 6040 #ifndef PRODUCT 6041 { 6042 char comment[64]; 6043 snprintf(comment, sizeof comment, "{string_equalsL"); 6044 BLOCK_COMMENT(comment); 6045 } 6046 #endif 6047 6048 mov(result, false); 6049 6050 // Check for short strings, i.e. smaller than wordSize. 6051 subs(cnt1, cnt1, wordSize); 6052 br(Assembler::LT, SHORT); 6053 // Main 8 byte comparison loop. 6054 bind(NEXT_WORD); { 6055 ldr(tmp1, Address(post(a1, wordSize))); 6056 ldr(tmp2, Address(post(a2, wordSize))); 6057 subs(cnt1, cnt1, wordSize); 6058 eor(tmp1, tmp1, tmp2); 6059 cbnz(tmp1, DONE); 6060 } br(GT, NEXT_WORD); 6061 // Last longword. In the case where length == 4 we compare the 6062 // same longword twice, but that's still faster than another 6063 // conditional branch. 6064 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 6065 // length == 4. 6066 ldr(tmp1, Address(a1, cnt1)); 6067 ldr(tmp2, Address(a2, cnt1)); 6068 eor(tmp2, tmp1, tmp2); 6069 cbnz(tmp2, DONE); 6070 b(SAME); 6071 6072 bind(SHORT); 6073 Label TAIL03, TAIL01; 6074 6075 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 6076 { 6077 ldrw(tmp1, Address(post(a1, 4))); 6078 ldrw(tmp2, Address(post(a2, 4))); 6079 eorw(tmp1, tmp1, tmp2); 6080 cbnzw(tmp1, DONE); 6081 } 6082 bind(TAIL03); 6083 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 6084 { 6085 ldrh(tmp1, Address(post(a1, 2))); 6086 ldrh(tmp2, Address(post(a2, 2))); 6087 eorw(tmp1, tmp1, tmp2); 6088 cbnzw(tmp1, DONE); 6089 } 6090 bind(TAIL01); 6091 tbz(cnt1, 0, SAME); // 0-1 bytes left. 6092 { 6093 ldrb(tmp1, a1); 6094 ldrb(tmp2, a2); 6095 eorw(tmp1, tmp1, tmp2); 6096 cbnzw(tmp1, DONE); 6097 } 6098 // Arrays are equal. 6099 bind(SAME); 6100 mov(result, true); 6101 6102 // That's it. 6103 bind(DONE); 6104 BLOCK_COMMENT("} string_equals"); 6105 } 6106 6107 6108 // The size of the blocks erased by the zero_blocks stub. We must 6109 // handle anything smaller than this ourselves in zero_words(). 6110 const int MacroAssembler::zero_words_block_size = 8; 6111 6112 // zero_words() is used by C2 ClearArray patterns and by 6113 // C1_MacroAssembler. It is as small as possible, handling small word 6114 // counts locally and delegating anything larger to the zero_blocks 6115 // stub. It is expanded many times in compiled code, so it is 6116 // important to keep it short. 6117 6118 // ptr: Address of a buffer to be zeroed. 6119 // cnt: Count in HeapWords. 6120 // 6121 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 6122 address MacroAssembler::zero_words(Register ptr, Register cnt) 6123 { 6124 assert(is_power_of_2(zero_words_block_size), "adjust this"); 6125 6126 BLOCK_COMMENT("zero_words {"); 6127 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 6128 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6129 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6130 6131 subs(rscratch1, cnt, zero_words_block_size); 6132 Label around; 6133 br(LO, around); 6134 { 6135 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6136 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6137 // Make sure this is a C2 compilation. C1 allocates space only for 6138 // trampoline stubs generated by Call LIR ops, and in any case it 6139 // makes sense for a C1 compilation task to proceed as quickly as 6140 // possible. 6141 CompileTask* task; 6142 if (StubRoutines::aarch64::complete() 6143 && Thread::current()->is_Compiler_thread() 6144 && (task = ciEnv::current()->task()) 6145 && is_c2_compile(task->comp_level())) { 6146 address tpc = trampoline_call(zero_blocks); 6147 if (tpc == nullptr) { 6148 DEBUG_ONLY(reset_labels(around)); 6149 return nullptr; 6150 } 6151 } else { 6152 far_call(zero_blocks); 6153 } 6154 } 6155 bind(around); 6156 6157 // We have a few words left to do. zero_blocks has adjusted r10 and r11 6158 // for us. 6159 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 6160 Label l; 6161 tbz(cnt, exact_log2(i), l); 6162 for (int j = 0; j < i; j += 2) { 6163 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 6164 } 6165 bind(l); 6166 } 6167 { 6168 Label l; 6169 tbz(cnt, 0, l); 6170 str(zr, Address(ptr)); 6171 bind(l); 6172 } 6173 6174 BLOCK_COMMENT("} zero_words"); 6175 return pc(); 6176 } 6177 6178 // base: Address of a buffer to be zeroed, 8 bytes aligned. 6179 // cnt: Immediate count in HeapWords. 6180 // 6181 // r10, r11, rscratch1, and rscratch2 are clobbered. 6182 address MacroAssembler::zero_words(Register base, uint64_t cnt) 6183 { 6184 assert(wordSize <= BlockZeroingLowLimit, 6185 "increase BlockZeroingLowLimit"); 6186 address result = nullptr; 6187 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 6188 #ifndef PRODUCT 6189 { 6190 char buf[64]; 6191 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 6192 BLOCK_COMMENT(buf); 6193 } 6194 #endif 6195 if (cnt >= 16) { 6196 uint64_t loops = cnt/16; 6197 if (loops > 1) { 6198 mov(rscratch2, loops - 1); 6199 } 6200 { 6201 Label loop; 6202 bind(loop); 6203 for (int i = 0; i < 16; i += 2) { 6204 stp(zr, zr, Address(base, i * BytesPerWord)); 6205 } 6206 add(base, base, 16 * BytesPerWord); 6207 if (loops > 1) { 6208 subs(rscratch2, rscratch2, 1); 6209 br(GE, loop); 6210 } 6211 } 6212 } 6213 cnt %= 16; 6214 int i = cnt & 1; // store any odd word to start 6215 if (i) str(zr, Address(base)); 6216 for (; i < (int)cnt; i += 2) { 6217 stp(zr, zr, Address(base, i * wordSize)); 6218 } 6219 BLOCK_COMMENT("} zero_words"); 6220 result = pc(); 6221 } else { 6222 mov(r10, base); mov(r11, cnt); 6223 result = zero_words(r10, r11); 6224 } 6225 return result; 6226 } 6227 6228 // Zero blocks of memory by using DC ZVA. 6229 // 6230 // Aligns the base address first sufficiently for DC ZVA, then uses 6231 // DC ZVA repeatedly for every full block. cnt is the size to be 6232 // zeroed in HeapWords. Returns the count of words left to be zeroed 6233 // in cnt. 6234 // 6235 // NOTE: This is intended to be used in the zero_blocks() stub. If 6236 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6237 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6238 Register tmp = rscratch1; 6239 Register tmp2 = rscratch2; 6240 int zva_length = VM_Version::zva_length(); 6241 Label initial_table_end, loop_zva; 6242 Label fini; 6243 6244 // Base must be 16 byte aligned. If not just return and let caller handle it 6245 tst(base, 0x0f); 6246 br(Assembler::NE, fini); 6247 // Align base with ZVA length. 6248 neg(tmp, base); 6249 andr(tmp, tmp, zva_length - 1); 6250 6251 // tmp: the number of bytes to be filled to align the base with ZVA length. 6252 add(base, base, tmp); 6253 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6254 adr(tmp2, initial_table_end); 6255 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6256 br(tmp2); 6257 6258 for (int i = -zva_length + 16; i < 0; i += 16) 6259 stp(zr, zr, Address(base, i)); 6260 bind(initial_table_end); 6261 6262 sub(cnt, cnt, zva_length >> 3); 6263 bind(loop_zva); 6264 dc(Assembler::ZVA, base); 6265 subs(cnt, cnt, zva_length >> 3); 6266 add(base, base, zva_length); 6267 br(Assembler::GE, loop_zva); 6268 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6269 bind(fini); 6270 } 6271 6272 // base: Address of a buffer to be filled, 8 bytes aligned. 6273 // cnt: Count in 8-byte unit. 6274 // value: Value to be filled with. 6275 // base will point to the end of the buffer after filling. 6276 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6277 { 6278 // Algorithm: 6279 // 6280 // if (cnt == 0) { 6281 // return; 6282 // } 6283 // if ((p & 8) != 0) { 6284 // *p++ = v; 6285 // } 6286 // 6287 // scratch1 = cnt & 14; 6288 // cnt -= scratch1; 6289 // p += scratch1; 6290 // switch (scratch1 / 2) { 6291 // do { 6292 // cnt -= 16; 6293 // p[-16] = v; 6294 // p[-15] = v; 6295 // case 7: 6296 // p[-14] = v; 6297 // p[-13] = v; 6298 // case 6: 6299 // p[-12] = v; 6300 // p[-11] = v; 6301 // // ... 6302 // case 1: 6303 // p[-2] = v; 6304 // p[-1] = v; 6305 // case 0: 6306 // p += 16; 6307 // } while (cnt); 6308 // } 6309 // if ((cnt & 1) == 1) { 6310 // *p++ = v; 6311 // } 6312 6313 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6314 6315 Label fini, skip, entry, loop; 6316 const int unroll = 8; // Number of stp instructions we'll unroll 6317 6318 cbz(cnt, fini); 6319 tbz(base, 3, skip); 6320 str(value, Address(post(base, 8))); 6321 sub(cnt, cnt, 1); 6322 bind(skip); 6323 6324 andr(rscratch1, cnt, (unroll-1) * 2); 6325 sub(cnt, cnt, rscratch1); 6326 add(base, base, rscratch1, Assembler::LSL, 3); 6327 adr(rscratch2, entry); 6328 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6329 br(rscratch2); 6330 6331 bind(loop); 6332 add(base, base, unroll * 16); 6333 for (int i = -unroll; i < 0; i++) 6334 stp(value, value, Address(base, i * 16)); 6335 bind(entry); 6336 subs(cnt, cnt, unroll * 2); 6337 br(Assembler::GE, loop); 6338 6339 tbz(cnt, 0, fini); 6340 str(value, Address(post(base, 8))); 6341 bind(fini); 6342 } 6343 6344 // Intrinsic for 6345 // 6346 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6347 // return the number of characters copied. 6348 // - java/lang/StringUTF16.compress 6349 // return index of non-latin1 character if copy fails, otherwise 'len'. 6350 // 6351 // This version always returns the number of characters copied, and does not 6352 // clobber the 'len' register. A successful copy will complete with the post- 6353 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6354 // post-condition: 0 <= 'res' < 'len'. 6355 // 6356 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6357 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6358 // beyond the acceptable, even though the footprint would be smaller. 6359 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6360 // avoid additional bloat. 6361 // 6362 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6363 void MacroAssembler::encode_iso_array(Register src, Register dst, 6364 Register len, Register res, bool ascii, 6365 FloatRegister vtmp0, FloatRegister vtmp1, 6366 FloatRegister vtmp2, FloatRegister vtmp3, 6367 FloatRegister vtmp4, FloatRegister vtmp5) 6368 { 6369 Register cnt = res; 6370 Register max = rscratch1; 6371 Register chk = rscratch2; 6372 6373 prfm(Address(src), PLDL1STRM); 6374 movw(cnt, len); 6375 6376 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6377 6378 Label LOOP_32, DONE_32, FAIL_32; 6379 6380 BIND(LOOP_32); 6381 { 6382 cmpw(cnt, 32); 6383 br(LT, DONE_32); 6384 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6385 // Extract lower bytes. 6386 FloatRegister vlo0 = vtmp4; 6387 FloatRegister vlo1 = vtmp5; 6388 uzp1(vlo0, T16B, vtmp0, vtmp1); 6389 uzp1(vlo1, T16B, vtmp2, vtmp3); 6390 // Merge bits... 6391 orr(vtmp0, T16B, vtmp0, vtmp1); 6392 orr(vtmp2, T16B, vtmp2, vtmp3); 6393 // Extract merged upper bytes. 6394 FloatRegister vhix = vtmp0; 6395 uzp2(vhix, T16B, vtmp0, vtmp2); 6396 // ISO-check on hi-parts (all zero). 6397 // ASCII-check on lo-parts (no sign). 6398 FloatRegister vlox = vtmp1; // Merge lower bytes. 6399 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6400 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6401 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6402 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6403 ASCII(orr(chk, chk, max)); 6404 cbnz(chk, FAIL_32); 6405 subw(cnt, cnt, 32); 6406 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6407 b(LOOP_32); 6408 } 6409 BIND(FAIL_32); 6410 sub(src, src, 64); 6411 BIND(DONE_32); 6412 6413 Label LOOP_8, SKIP_8; 6414 6415 BIND(LOOP_8); 6416 { 6417 cmpw(cnt, 8); 6418 br(LT, SKIP_8); 6419 FloatRegister vhi = vtmp0; 6420 FloatRegister vlo = vtmp1; 6421 ld1(vtmp3, T8H, src); 6422 uzp1(vlo, T16B, vtmp3, vtmp3); 6423 uzp2(vhi, T16B, vtmp3, vtmp3); 6424 // ISO-check on hi-parts (all zero). 6425 // ASCII-check on lo-parts (no sign). 6426 ASCII(cm(LT, vtmp2, T16B, vlo)); 6427 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6428 ASCII(umov(max, vtmp2, B, 0)); 6429 ASCII(orr(chk, chk, max)); 6430 cbnz(chk, SKIP_8); 6431 6432 strd(vlo, Address(post(dst, 8))); 6433 subw(cnt, cnt, 8); 6434 add(src, src, 16); 6435 b(LOOP_8); 6436 } 6437 BIND(SKIP_8); 6438 6439 #undef ASCII 6440 6441 Label LOOP, DONE; 6442 6443 cbz(cnt, DONE); 6444 BIND(LOOP); 6445 { 6446 Register chr = rscratch1; 6447 ldrh(chr, Address(post(src, 2))); 6448 tst(chr, ascii ? 0xff80 : 0xff00); 6449 br(NE, DONE); 6450 strb(chr, Address(post(dst, 1))); 6451 subs(cnt, cnt, 1); 6452 br(GT, LOOP); 6453 } 6454 BIND(DONE); 6455 // Return index where we stopped. 6456 subw(res, len, cnt); 6457 } 6458 6459 // Inflate byte[] array to char[]. 6460 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6461 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6462 FloatRegister vtmp1, FloatRegister vtmp2, 6463 FloatRegister vtmp3, Register tmp4) { 6464 Label big, done, after_init, to_stub; 6465 6466 assert_different_registers(src, dst, len, tmp4, rscratch1); 6467 6468 fmovd(vtmp1, 0.0); 6469 lsrw(tmp4, len, 3); 6470 bind(after_init); 6471 cbnzw(tmp4, big); 6472 // Short string: less than 8 bytes. 6473 { 6474 Label loop, tiny; 6475 6476 cmpw(len, 4); 6477 br(LT, tiny); 6478 // Use SIMD to do 4 bytes. 6479 ldrs(vtmp2, post(src, 4)); 6480 zip1(vtmp3, T8B, vtmp2, vtmp1); 6481 subw(len, len, 4); 6482 strd(vtmp3, post(dst, 8)); 6483 6484 cbzw(len, done); 6485 6486 // Do the remaining bytes by steam. 6487 bind(loop); 6488 ldrb(tmp4, post(src, 1)); 6489 strh(tmp4, post(dst, 2)); 6490 subw(len, len, 1); 6491 6492 bind(tiny); 6493 cbnz(len, loop); 6494 6495 b(done); 6496 } 6497 6498 if (SoftwarePrefetchHintDistance >= 0) { 6499 bind(to_stub); 6500 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6501 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6502 address tpc = trampoline_call(stub); 6503 if (tpc == nullptr) { 6504 DEBUG_ONLY(reset_labels(big, done)); 6505 postcond(pc() == badAddress); 6506 return nullptr; 6507 } 6508 b(after_init); 6509 } 6510 6511 // Unpack the bytes 8 at a time. 6512 bind(big); 6513 { 6514 Label loop, around, loop_last, loop_start; 6515 6516 if (SoftwarePrefetchHintDistance >= 0) { 6517 const int large_loop_threshold = (64 + 16)/8; 6518 ldrd(vtmp2, post(src, 8)); 6519 andw(len, len, 7); 6520 cmp(tmp4, (u1)large_loop_threshold); 6521 br(GE, to_stub); 6522 b(loop_start); 6523 6524 bind(loop); 6525 ldrd(vtmp2, post(src, 8)); 6526 bind(loop_start); 6527 subs(tmp4, tmp4, 1); 6528 br(EQ, loop_last); 6529 zip1(vtmp2, T16B, vtmp2, vtmp1); 6530 ldrd(vtmp3, post(src, 8)); 6531 st1(vtmp2, T8H, post(dst, 16)); 6532 subs(tmp4, tmp4, 1); 6533 zip1(vtmp3, T16B, vtmp3, vtmp1); 6534 st1(vtmp3, T8H, post(dst, 16)); 6535 br(NE, loop); 6536 b(around); 6537 bind(loop_last); 6538 zip1(vtmp2, T16B, vtmp2, vtmp1); 6539 st1(vtmp2, T8H, post(dst, 16)); 6540 bind(around); 6541 cbz(len, done); 6542 } else { 6543 andw(len, len, 7); 6544 bind(loop); 6545 ldrd(vtmp2, post(src, 8)); 6546 sub(tmp4, tmp4, 1); 6547 zip1(vtmp3, T16B, vtmp2, vtmp1); 6548 st1(vtmp3, T8H, post(dst, 16)); 6549 cbnz(tmp4, loop); 6550 } 6551 } 6552 6553 // Do the tail of up to 8 bytes. 6554 add(src, src, len); 6555 ldrd(vtmp3, Address(src, -8)); 6556 add(dst, dst, len, ext::uxtw, 1); 6557 zip1(vtmp3, T16B, vtmp3, vtmp1); 6558 strq(vtmp3, Address(dst, -16)); 6559 6560 bind(done); 6561 postcond(pc() != badAddress); 6562 return pc(); 6563 } 6564 6565 // Compress char[] array to byte[]. 6566 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6567 // Return the array length if every element in array can be encoded, 6568 // otherwise, the index of first non-latin1 (> 0xff) character. 6569 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6570 Register res, 6571 FloatRegister tmp0, FloatRegister tmp1, 6572 FloatRegister tmp2, FloatRegister tmp3, 6573 FloatRegister tmp4, FloatRegister tmp5) { 6574 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6575 } 6576 6577 // java.math.round(double a) 6578 // Returns the closest long to the argument, with ties rounding to 6579 // positive infinity. This requires some fiddling for corner 6580 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6581 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6582 FloatRegister ftmp) { 6583 Label DONE; 6584 BLOCK_COMMENT("java_round_double: { "); 6585 fmovd(rscratch1, src); 6586 // Use RoundToNearestTiesAway unless src small and -ve. 6587 fcvtasd(dst, src); 6588 // Test if src >= 0 || abs(src) >= 0x1.0p52 6589 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6590 mov(rscratch2, julong_cast(0x1.0p52)); 6591 cmp(rscratch1, rscratch2); 6592 br(HS, DONE); { 6593 // src < 0 && abs(src) < 0x1.0p52 6594 // src may have a fractional part, so add 0.5 6595 fmovd(ftmp, 0.5); 6596 faddd(ftmp, src, ftmp); 6597 // Convert double to jlong, use RoundTowardsNegative 6598 fcvtmsd(dst, ftmp); 6599 } 6600 bind(DONE); 6601 BLOCK_COMMENT("} java_round_double"); 6602 } 6603 6604 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6605 FloatRegister ftmp) { 6606 Label DONE; 6607 BLOCK_COMMENT("java_round_float: { "); 6608 fmovs(rscratch1, src); 6609 // Use RoundToNearestTiesAway unless src small and -ve. 6610 fcvtassw(dst, src); 6611 // Test if src >= 0 || abs(src) >= 0x1.0p23 6612 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6613 mov(rscratch2, jint_cast(0x1.0p23f)); 6614 cmp(rscratch1, rscratch2); 6615 br(HS, DONE); { 6616 // src < 0 && |src| < 0x1.0p23 6617 // src may have a fractional part, so add 0.5 6618 fmovs(ftmp, 0.5f); 6619 fadds(ftmp, src, ftmp); 6620 // Convert float to jint, use RoundTowardsNegative 6621 fcvtmssw(dst, ftmp); 6622 } 6623 bind(DONE); 6624 BLOCK_COMMENT("} java_round_float"); 6625 } 6626 6627 // get_thread() can be called anywhere inside generated code so we 6628 // need to save whatever non-callee save context might get clobbered 6629 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6630 // the call setup code. 6631 // 6632 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6633 // On other systems, the helper is a usual C function. 6634 // 6635 void MacroAssembler::get_thread(Register dst) { 6636 RegSet saved_regs = 6637 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6638 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6639 6640 protect_return_address(); 6641 push(saved_regs, sp); 6642 6643 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6644 blr(lr); 6645 if (dst != c_rarg0) { 6646 mov(dst, c_rarg0); 6647 } 6648 6649 pop(saved_regs, sp); 6650 authenticate_return_address(); 6651 } 6652 6653 void MacroAssembler::cache_wb(Address line) { 6654 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6655 assert(line.index() == noreg, "index should be noreg"); 6656 assert(line.offset() == 0, "offset should be 0"); 6657 // would like to assert this 6658 // assert(line._ext.shift == 0, "shift should be zero"); 6659 if (VM_Version::supports_dcpop()) { 6660 // writeback using clear virtual address to point of persistence 6661 dc(Assembler::CVAP, line.base()); 6662 } else { 6663 // no need to generate anything as Unsafe.writebackMemory should 6664 // never invoke this stub 6665 } 6666 } 6667 6668 void MacroAssembler::cache_wbsync(bool is_pre) { 6669 // we only need a barrier post sync 6670 if (!is_pre) { 6671 membar(Assembler::AnyAny); 6672 } 6673 } 6674 6675 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6676 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) { 6677 return; 6678 } 6679 // Make sure that native code does not change SVE vector length. 6680 Label verify_ok; 6681 movw(tmp, zr); 6682 sve_inc(tmp, B); 6683 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6684 br(EQ, verify_ok); 6685 stop("Error: SVE vector length has changed since jvm startup"); 6686 bind(verify_ok); 6687 } 6688 6689 void MacroAssembler::verify_ptrue() { 6690 Label verify_ok; 6691 if (!UseSVE) { 6692 return; 6693 } 6694 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6695 sve_dec(rscratch1, B); 6696 cbz(rscratch1, verify_ok); 6697 stop("Error: the preserved predicate register (p7) elements are not all true"); 6698 bind(verify_ok); 6699 } 6700 6701 void MacroAssembler::safepoint_isb() { 6702 isb(); 6703 #ifndef PRODUCT 6704 if (VerifyCrossModifyFence) { 6705 // Clear the thread state. 6706 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6707 } 6708 #endif 6709 } 6710 6711 #ifndef PRODUCT 6712 void MacroAssembler::verify_cross_modify_fence_not_required() { 6713 if (VerifyCrossModifyFence) { 6714 // Check if thread needs a cross modify fence. 6715 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6716 Label fence_not_required; 6717 cbz(rscratch1, fence_not_required); 6718 // If it does then fail. 6719 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure))); 6720 mov(c_rarg0, rthread); 6721 blr(rscratch1); 6722 bind(fence_not_required); 6723 } 6724 } 6725 #endif 6726 6727 void MacroAssembler::spin_wait() { 6728 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6729 switch (VM_Version::spin_wait_desc().inst()) { 6730 case SpinWait::NOP: 6731 nop(); 6732 break; 6733 case SpinWait::ISB: 6734 isb(); 6735 break; 6736 case SpinWait::YIELD: 6737 yield(); 6738 break; 6739 default: 6740 ShouldNotReachHere(); 6741 } 6742 } 6743 } 6744 6745 // Stack frame creation/removal 6746 6747 void MacroAssembler::enter(bool strip_ret_addr) { 6748 if (strip_ret_addr) { 6749 // Addresses can only be signed once. If there are multiple nested frames being created 6750 // in the same function, then the return address needs stripping first. 6751 strip_return_address(); 6752 } 6753 protect_return_address(); 6754 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6755 mov(rfp, sp); 6756 } 6757 6758 void MacroAssembler::leave() { 6759 mov(sp, rfp); 6760 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6761 authenticate_return_address(); 6762 } 6763 6764 // ROP Protection 6765 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6766 // destroying stack frames or whenever directly loading/storing the LR to memory. 6767 // If ROP protection is not set then these functions are no-ops. 6768 // For more details on PAC see pauth_aarch64.hpp. 6769 6770 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6771 // Uses value zero as the modifier. 6772 // 6773 void MacroAssembler::protect_return_address() { 6774 if (VM_Version::use_rop_protection()) { 6775 check_return_address(); 6776 paciaz(); 6777 } 6778 } 6779 6780 // Sign the return value in the given register. Use before updating the LR in the existing stack 6781 // frame for the current function. 6782 // Uses value zero as the modifier. 6783 // 6784 void MacroAssembler::protect_return_address(Register return_reg) { 6785 if (VM_Version::use_rop_protection()) { 6786 check_return_address(return_reg); 6787 paciza(return_reg); 6788 } 6789 } 6790 6791 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6792 // Uses value zero as the modifier. 6793 // 6794 void MacroAssembler::authenticate_return_address() { 6795 if (VM_Version::use_rop_protection()) { 6796 autiaz(); 6797 check_return_address(); 6798 } 6799 } 6800 6801 // Authenticate the return value in the given register. Use before updating the LR in the existing 6802 // stack frame for the current function. 6803 // Uses value zero as the modifier. 6804 // 6805 void MacroAssembler::authenticate_return_address(Register return_reg) { 6806 if (VM_Version::use_rop_protection()) { 6807 autiza(return_reg); 6808 check_return_address(return_reg); 6809 } 6810 } 6811 6812 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6813 // there is no guaranteed way of authenticating the LR. 6814 // 6815 void MacroAssembler::strip_return_address() { 6816 if (VM_Version::use_rop_protection()) { 6817 xpaclri(); 6818 } 6819 } 6820 6821 #ifndef PRODUCT 6822 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6823 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6824 // it is difficult to debug back to the callee function. 6825 // This function simply loads from the address in the given register. 6826 // Use directly after authentication to catch authentication failures. 6827 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6828 // 6829 void MacroAssembler::check_return_address(Register return_reg) { 6830 if (VM_Version::use_rop_protection()) { 6831 ldr(zr, Address(return_reg)); 6832 } 6833 } 6834 #endif 6835 6836 // The java_calling_convention describes stack locations as ideal slots on 6837 // a frame with no abi restrictions. Since we must observe abi restrictions 6838 // (like the placement of the register window) the slots must be biased by 6839 // the following value. 6840 static int reg2offset_in(VMReg r) { 6841 // Account for saved rfp and lr 6842 // This should really be in_preserve_stack_slots 6843 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6844 } 6845 6846 static int reg2offset_out(VMReg r) { 6847 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6848 } 6849 6850 // On 64bit we will store integer like items to the stack as 6851 // 64bits items (AArch64 ABI) even though java would only store 6852 // 32bits for a parameter. On 32bit it will simply be 32bits 6853 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6854 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6855 if (src.first()->is_stack()) { 6856 if (dst.first()->is_stack()) { 6857 // stack to stack 6858 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6859 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6860 } else { 6861 // stack to reg 6862 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6863 } 6864 } else if (dst.first()->is_stack()) { 6865 // reg to stack 6866 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6867 } else { 6868 if (dst.first() != src.first()) { 6869 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6870 } 6871 } 6872 } 6873 6874 // An oop arg. Must pass a handle not the oop itself 6875 void MacroAssembler::object_move( 6876 OopMap* map, 6877 int oop_handle_offset, 6878 int framesize_in_slots, 6879 VMRegPair src, 6880 VMRegPair dst, 6881 bool is_receiver, 6882 int* receiver_offset) { 6883 6884 // must pass a handle. First figure out the location we use as a handle 6885 6886 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6887 6888 // See if oop is null if it is we need no handle 6889 6890 if (src.first()->is_stack()) { 6891 6892 // Oop is already on the stack as an argument 6893 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6894 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6895 if (is_receiver) { 6896 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6897 } 6898 6899 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6900 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6901 // conditionally move a null 6902 cmp(rscratch1, zr); 6903 csel(rHandle, zr, rHandle, Assembler::EQ); 6904 } else { 6905 6906 // Oop is in an a register we must store it to the space we reserve 6907 // on the stack for oop_handles and pass a handle if oop is non-null 6908 6909 const Register rOop = src.first()->as_Register(); 6910 int oop_slot; 6911 if (rOop == j_rarg0) 6912 oop_slot = 0; 6913 else if (rOop == j_rarg1) 6914 oop_slot = 1; 6915 else if (rOop == j_rarg2) 6916 oop_slot = 2; 6917 else if (rOop == j_rarg3) 6918 oop_slot = 3; 6919 else if (rOop == j_rarg4) 6920 oop_slot = 4; 6921 else if (rOop == j_rarg5) 6922 oop_slot = 5; 6923 else if (rOop == j_rarg6) 6924 oop_slot = 6; 6925 else { 6926 assert(rOop == j_rarg7, "wrong register"); 6927 oop_slot = 7; 6928 } 6929 6930 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6931 int offset = oop_slot*VMRegImpl::stack_slot_size; 6932 6933 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6934 // Store oop in handle area, may be null 6935 str(rOop, Address(sp, offset)); 6936 if (is_receiver) { 6937 *receiver_offset = offset; 6938 } 6939 6940 cmp(rOop, zr); 6941 lea(rHandle, Address(sp, offset)); 6942 // conditionally move a null 6943 csel(rHandle, zr, rHandle, Assembler::EQ); 6944 } 6945 6946 // If arg is on the stack then place it otherwise it is already in correct reg. 6947 if (dst.first()->is_stack()) { 6948 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6949 } 6950 } 6951 6952 // A float arg may have to do float reg int reg conversion 6953 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6954 if (src.first()->is_stack()) { 6955 if (dst.first()->is_stack()) { 6956 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6957 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6958 } else { 6959 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6960 } 6961 } else if (src.first() != dst.first()) { 6962 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6963 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6964 else 6965 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6966 } 6967 } 6968 6969 // A long move 6970 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6971 if (src.first()->is_stack()) { 6972 if (dst.first()->is_stack()) { 6973 // stack to stack 6974 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6975 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6976 } else { 6977 // stack to reg 6978 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6979 } 6980 } else if (dst.first()->is_stack()) { 6981 // reg to stack 6982 // Do we really have to sign extend??? 6983 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6984 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6985 } else { 6986 if (dst.first() != src.first()) { 6987 mov(dst.first()->as_Register(), src.first()->as_Register()); 6988 } 6989 } 6990 } 6991 6992 6993 // A double move 6994 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6995 if (src.first()->is_stack()) { 6996 if (dst.first()->is_stack()) { 6997 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6998 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6999 } else { 7000 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7001 } 7002 } else if (src.first() != dst.first()) { 7003 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7004 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7005 else 7006 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7007 } 7008 } 7009 7010 // Implements lightweight-locking. 7011 // 7012 // - obj: the object to be locked 7013 // - t1, t2, t3: temporary registers, will be destroyed 7014 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 7015 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { 7016 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7017 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); 7018 7019 Label push; 7020 const Register top = t1; 7021 const Register mark = t2; 7022 const Register t = t3; 7023 7024 // Preload the markWord. It is important that this is the first 7025 // instruction emitted as it is part of C1's null check semantics. 7026 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7027 7028 if (UseObjectMonitorTable) { 7029 // Clear cache in case fast locking succeeds or we need to take the slow-path. 7030 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); 7031 } 7032 7033 if (DiagnoseSyncOnValueBasedClasses != 0) { 7034 load_klass(t1, obj); 7035 ldrb(t1, Address(t1, Klass::misc_flags_offset())); 7036 tst(t1, KlassFlags::_misc_is_value_based_class); 7037 br(Assembler::NE, slow); 7038 } 7039 7040 // Check if the lock-stack is full. 7041 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7042 cmpw(top, (unsigned)LockStack::end_offset()); 7043 br(Assembler::GE, slow); 7044 7045 // Check for recursion. 7046 subw(t, top, oopSize); 7047 ldr(t, Address(rthread, t)); 7048 cmp(obj, t); 7049 br(Assembler::EQ, push); 7050 7051 // Check header for monitor (0b10). 7052 tst(mark, markWord::monitor_value); 7053 br(Assembler::NE, slow); 7054 7055 // Try to lock. Transition lock bits 0b01 => 0b00 7056 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7057 orr(mark, mark, markWord::unlocked_value); 7058 eor(t, mark, markWord::unlocked_value); 7059 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 7060 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 7061 br(Assembler::NE, slow); 7062 7063 bind(push); 7064 // After successful lock, push object on lock-stack. 7065 str(obj, Address(rthread, top)); 7066 addw(top, top, oopSize); 7067 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7068 } 7069 7070 // Implements lightweight-unlocking. 7071 // 7072 // - obj: the object to be unlocked 7073 // - t1, t2, t3: temporary registers 7074 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 7075 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 7076 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7077 // cmpxchg clobbers rscratch1. 7078 assert_different_registers(obj, t1, t2, t3, rscratch1); 7079 7080 #ifdef ASSERT 7081 { 7082 // Check for lock-stack underflow. 7083 Label stack_ok; 7084 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7085 cmpw(t1, (unsigned)LockStack::start_offset()); 7086 br(Assembler::GE, stack_ok); 7087 STOP("Lock-stack underflow"); 7088 bind(stack_ok); 7089 } 7090 #endif 7091 7092 Label unlocked, push_and_slow; 7093 const Register top = t1; 7094 const Register mark = t2; 7095 const Register t = t3; 7096 7097 // Check if obj is top of lock-stack. 7098 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7099 subw(top, top, oopSize); 7100 ldr(t, Address(rthread, top)); 7101 cmp(obj, t); 7102 br(Assembler::NE, slow); 7103 7104 // Pop lock-stack. 7105 DEBUG_ONLY(str(zr, Address(rthread, top));) 7106 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7107 7108 // Check if recursive. 7109 subw(t, top, oopSize); 7110 ldr(t, Address(rthread, t)); 7111 cmp(obj, t); 7112 br(Assembler::EQ, unlocked); 7113 7114 // Not recursive. Check header for monitor (0b10). 7115 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7116 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 7117 7118 #ifdef ASSERT 7119 // Check header not unlocked (0b01). 7120 Label not_unlocked; 7121 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 7122 stop("lightweight_unlock already unlocked"); 7123 bind(not_unlocked); 7124 #endif 7125 7126 // Try to unlock. Transition lock bits 0b00 => 0b01 7127 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7128 orr(t, mark, markWord::unlocked_value); 7129 cmpxchg(obj, mark, t, Assembler::xword, 7130 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 7131 br(Assembler::EQ, unlocked); 7132 7133 bind(push_and_slow); 7134 // Restore lock-stack and handle the unlock in runtime. 7135 DEBUG_ONLY(str(obj, Address(rthread, top));) 7136 addw(top, top, oopSize); 7137 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7138 b(slow); 7139 7140 bind(unlocked); 7141 }