1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "code/compiledIC.hpp" 31 #include "compiler/compileTask.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "gc/shared/barrierSetAssembler.hpp" 36 #include "gc/shared/cardTableBarrierSet.hpp" 37 #include "gc/shared/cardTable.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/tlab_globals.hpp" 40 #include "interpreter/bytecodeHistogram.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "interpreter/interpreterRuntime.hpp" 43 #include "jvm.h" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "nativeInst_aarch64.hpp" 47 #include "oops/accessDecorators.hpp" 48 #include "oops/compressedKlass.inline.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/klass.inline.hpp" 51 #include "runtime/continuation.hpp" 52 #include "runtime/icache.hpp" 53 #include "runtime/interfaceSupport.inline.hpp" 54 #include "runtime/javaThread.hpp" 55 #include "runtime/jniHandles.inline.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/stubRoutines.hpp" 58 #include "utilities/globalDefinitions.hpp" 59 #include "utilities/powerOfTwo.hpp" 60 #ifdef COMPILER1 61 #include "c1/c1_LIRAssembler.hpp" 62 #endif 63 #ifdef COMPILER2 64 #include "oops/oop.hpp" 65 #include "opto/compile.hpp" 66 #include "opto/node.hpp" 67 #include "opto/output.hpp" 68 #endif 69 70 #include <sys/types.h> 71 72 #ifdef PRODUCT 73 #define BLOCK_COMMENT(str) /* nothing */ 74 #else 75 #define BLOCK_COMMENT(str) block_comment(str) 76 #endif 77 #define STOP(str) stop(str); 78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 79 80 #ifdef ASSERT 81 extern "C" void disnm(intptr_t p); 82 #endif 83 // Target-dependent relocation processing 84 // 85 // Instruction sequences whose target may need to be retrieved or 86 // patched are distinguished by their leading instruction, sorting 87 // them into three main instruction groups and related subgroups. 88 // 89 // 1) Branch, Exception and System (insn count = 1) 90 // 1a) Unconditional branch (immediate): 91 // b/bl imm19 92 // 1b) Compare & branch (immediate): 93 // cbz/cbnz Rt imm19 94 // 1c) Test & branch (immediate): 95 // tbz/tbnz Rt imm14 96 // 1d) Conditional branch (immediate): 97 // b.cond imm19 98 // 99 // 2) Loads and Stores (insn count = 1) 100 // 2a) Load register literal: 101 // ldr Rt imm19 102 // 103 // 3) Data Processing Immediate (insn count = 2 or 3) 104 // 3a) PC-rel. addressing 105 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 106 // adr/adrp Rx imm21; add Ry Rx #imm12 107 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 108 // adr/adrp Rx imm21 109 // adr/adrp Rx imm21; movk Rx #imm16<<32 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 111 // The latter form can only happen when the target is an 112 // ExternalAddress, and (by definition) ExternalAddresses don't 113 // move. Because of that property, there is never any need to 114 // patch the last of the three instructions. However, 115 // MacroAssembler::target_addr_for_insn takes all three 116 // instructions into account and returns the correct address. 117 // 3b) Move wide (immediate) 118 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 119 // 120 // A switch on a subset of the instruction's bits provides an 121 // efficient dispatch to these subcases. 122 // 123 // insn[28:26] -> main group ('x' == don't care) 124 // 00x -> UNALLOCATED 125 // 100 -> Data Processing Immediate 126 // 101 -> Branch, Exception and System 127 // x1x -> Loads and Stores 128 // 129 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 130 // n.b. in some cases extra bits need to be checked to verify the 131 // instruction is as expected 132 // 133 // 1) ... xx101x Branch, Exception and System 134 // 1a) 00___x Unconditional branch (immediate) 135 // 1b) 01___0 Compare & branch (immediate) 136 // 1c) 01___1 Test & branch (immediate) 137 // 1d) 10___0 Conditional branch (immediate) 138 // other Should not happen 139 // 140 // 2) ... xxx1x0 Loads and Stores 141 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 142 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 143 // strictly should be 64 bit non-FP/SIMD i.e. 144 // 0101_000 (i.e. requires insn[31:24] == 01011000) 145 // 146 // 3) ... xx100x Data Processing Immediate 147 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 148 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 149 // strictly should be 64 bit movz #imm16<<0 150 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 151 // 152 class RelocActions { 153 protected: 154 typedef int (*reloc_insn)(address insn_addr, address &target); 155 156 virtual reloc_insn adrpMem() = 0; 157 virtual reloc_insn adrpAdd() = 0; 158 virtual reloc_insn adrpMovk() = 0; 159 160 const address _insn_addr; 161 const uint32_t _insn; 162 163 static uint32_t insn_at(address insn_addr, int n) { 164 return ((uint32_t*)insn_addr)[n]; 165 } 166 uint32_t insn_at(int n) const { 167 return insn_at(_insn_addr, n); 168 } 169 170 public: 171 172 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 173 RelocActions(address insn_addr, uint32_t insn) 174 : _insn_addr(insn_addr), _insn(insn) {} 175 176 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 177 virtual int conditionalBranch(address insn_addr, address &target) = 0; 178 virtual int testAndBranch(address insn_addr, address &target) = 0; 179 virtual int loadStore(address insn_addr, address &target) = 0; 180 virtual int adr(address insn_addr, address &target) = 0; 181 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 182 virtual int immediate(address insn_addr, address &target) = 0; 183 virtual void verify(address insn_addr, address &target) = 0; 184 185 int ALWAYSINLINE run(address insn_addr, address &target) { 186 int instructions = 1; 187 188 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 189 switch(dispatch) { 190 case 0b001010: 191 case 0b001011: { 192 instructions = unconditionalBranch(insn_addr, target); 193 break; 194 } 195 case 0b101010: // Conditional branch (immediate) 196 case 0b011010: { // Compare & branch (immediate) 197 instructions = conditionalBranch(insn_addr, target); 198 break; 199 } 200 case 0b011011: { 201 instructions = testAndBranch(insn_addr, target); 202 break; 203 } 204 case 0b001100: 205 case 0b001110: 206 case 0b011100: 207 case 0b011110: 208 case 0b101100: 209 case 0b101110: 210 case 0b111100: 211 case 0b111110: { 212 // load/store 213 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 214 // Load register (literal) 215 instructions = loadStore(insn_addr, target); 216 break; 217 } else { 218 // nothing to do 219 assert(target == 0, "did not expect to relocate target for polling page load"); 220 } 221 break; 222 } 223 case 0b001000: 224 case 0b011000: 225 case 0b101000: 226 case 0b111000: { 227 // adr/adrp 228 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 229 int shift = Instruction_aarch64::extract(_insn, 31, 31); 230 if (shift) { 231 uint32_t insn2 = insn_at(1); 232 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 233 Instruction_aarch64::extract(_insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 9, 5)) { 235 instructions = adrp(insn_addr, target, adrpMem()); 236 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 237 Instruction_aarch64::extract(_insn, 4, 0) == 238 Instruction_aarch64::extract(insn2, 4, 0)) { 239 instructions = adrp(insn_addr, target, adrpAdd()); 240 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 241 Instruction_aarch64::extract(_insn, 4, 0) == 242 Instruction_aarch64::extract(insn2, 4, 0)) { 243 instructions = adrp(insn_addr, target, adrpMovk()); 244 } else { 245 ShouldNotReachHere(); 246 } 247 } else { 248 instructions = adr(insn_addr, target); 249 } 250 break; 251 } 252 case 0b001001: 253 case 0b011001: 254 case 0b101001: 255 case 0b111001: { 256 instructions = immediate(insn_addr, target); 257 break; 258 } 259 default: { 260 ShouldNotReachHere(); 261 } 262 } 263 264 verify(insn_addr, target); 265 return instructions * NativeInstruction::instruction_size; 266 } 267 }; 268 269 class Patcher : public RelocActions { 270 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 271 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 272 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 273 274 public: 275 Patcher(address insn_addr) : RelocActions(insn_addr) {} 276 277 virtual int unconditionalBranch(address insn_addr, address &target) { 278 intptr_t offset = (target - insn_addr) >> 2; 279 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 280 return 1; 281 } 282 virtual int conditionalBranch(address insn_addr, address &target) { 283 intptr_t offset = (target - insn_addr) >> 2; 284 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 285 return 1; 286 } 287 virtual int testAndBranch(address insn_addr, address &target) { 288 intptr_t offset = (target - insn_addr) >> 2; 289 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 290 return 1; 291 } 292 virtual int loadStore(address insn_addr, address &target) { 293 intptr_t offset = (target - insn_addr) >> 2; 294 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 295 return 1; 296 } 297 virtual int adr(address insn_addr, address &target) { 298 #ifdef ASSERT 299 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 300 #endif 301 // PC-rel. addressing 302 ptrdiff_t offset = target - insn_addr; 303 int offset_lo = offset & 3; 304 offset >>= 2; 305 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 306 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 307 return 1; 308 } 309 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 310 int instructions = 1; 311 #ifdef ASSERT 312 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 313 #endif 314 ptrdiff_t offset = target - insn_addr; 315 instructions = 2; 316 precond(inner != nullptr); 317 // Give the inner reloc a chance to modify the target. 318 address adjusted_target = target; 319 instructions = (*inner)(insn_addr, adjusted_target); 320 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 321 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 322 offset = adr_page - pc_page; 323 int offset_lo = offset & 3; 324 offset >>= 2; 325 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 326 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 327 return instructions; 328 } 329 static int adrpMem_impl(address insn_addr, address &target) { 330 uintptr_t dest = (uintptr_t)target; 331 int offset_lo = dest & 0xfff; 332 uint32_t insn2 = insn_at(insn_addr, 1); 333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 335 guarantee(((dest >> size) << size) == dest, "misaligned target"); 336 return 2; 337 } 338 static int adrpAdd_impl(address insn_addr, address &target) { 339 uintptr_t dest = (uintptr_t)target; 340 int offset_lo = dest & 0xfff; 341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 342 return 2; 343 } 344 static int adrpMovk_impl(address insn_addr, address &target) { 345 uintptr_t dest = uintptr_t(target); 346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 348 target = address(dest); 349 return 2; 350 } 351 virtual int immediate(address insn_addr, address &target) { 352 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 353 uint64_t dest = (uint64_t)target; 354 // Move wide constant 355 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 356 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 357 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 358 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 359 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 360 return 3; 361 } 362 virtual void verify(address insn_addr, address &target) { 363 #ifdef ASSERT 364 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 365 if (!(address_is == target)) { 366 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 367 disnm((intptr_t)insn_addr); 368 assert(address_is == target, "should be"); 369 } 370 #endif 371 } 372 }; 373 374 // If insn1 and insn2 use the same register to form an address, either 375 // by an offsetted LDR or a simple ADD, return the offset. If the 376 // second instruction is an LDR, the offset may be scaled. 377 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 378 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 379 Instruction_aarch64::extract(insn1, 4, 0) == 380 Instruction_aarch64::extract(insn2, 9, 5)) { 381 // Load/store register (unsigned immediate) 382 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 383 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 384 byte_offset <<= size; 385 return true; 386 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 387 Instruction_aarch64::extract(insn1, 4, 0) == 388 Instruction_aarch64::extract(insn2, 4, 0)) { 389 // add (immediate) 390 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 391 return true; 392 } 393 return false; 394 } 395 396 class AArch64Decoder : public RelocActions { 397 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 398 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 399 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 400 401 public: 402 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 403 404 virtual int loadStore(address insn_addr, address &target) { 405 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 406 target = insn_addr + (offset << 2); 407 return 1; 408 } 409 virtual int unconditionalBranch(address insn_addr, address &target) { 410 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 411 target = insn_addr + (offset << 2); 412 return 1; 413 } 414 virtual int conditionalBranch(address insn_addr, address &target) { 415 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 416 target = address(((uint64_t)insn_addr + (offset << 2))); 417 return 1; 418 } 419 virtual int testAndBranch(address insn_addr, address &target) { 420 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 421 target = address(((uint64_t)insn_addr + (offset << 2))); 422 return 1; 423 } 424 virtual int adr(address insn_addr, address &target) { 425 // PC-rel. addressing 426 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 427 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 428 target = address((uint64_t)insn_addr + offset); 429 return 1; 430 } 431 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 432 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 433 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 434 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 435 int shift = 12; 436 offset <<= shift; 437 uint64_t target_page = ((uint64_t)insn_addr) + offset; 438 target_page &= ((uint64_t)-1) << shift; 439 uint32_t insn2 = insn_at(1); 440 target = address(target_page); 441 precond(inner != nullptr); 442 (*inner)(insn_addr, target); 443 return 2; 444 } 445 static int adrpMem_impl(address insn_addr, address &target) { 446 uint32_t insn2 = insn_at(insn_addr, 1); 447 // Load/store register (unsigned immediate) 448 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 449 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 450 byte_offset <<= size; 451 target += byte_offset; 452 return 2; 453 } 454 static int adrpAdd_impl(address insn_addr, address &target) { 455 uint32_t insn2 = insn_at(insn_addr, 1); 456 // add (immediate) 457 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 458 target += byte_offset; 459 return 2; 460 } 461 static int adrpMovk_impl(address insn_addr, address &target) { 462 uint32_t insn2 = insn_at(insn_addr, 1); 463 uint64_t dest = uint64_t(target); 464 dest = (dest & 0xffff0000ffffffff) | 465 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 466 target = address(dest); 467 468 // We know the destination 4k page. Maybe we have a third 469 // instruction. 470 uint32_t insn = insn_at(insn_addr, 0); 471 uint32_t insn3 = insn_at(insn_addr, 2); 472 ptrdiff_t byte_offset; 473 if (offset_for(insn, insn3, byte_offset)) { 474 target += byte_offset; 475 return 3; 476 } else { 477 return 2; 478 } 479 } 480 virtual int immediate(address insn_addr, address &target) { 481 uint32_t *insns = (uint32_t *)insn_addr; 482 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 483 // Move wide constant: movz, movk, movk. See movptr(). 484 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 485 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 486 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 487 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 488 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 489 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 490 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 491 return 3; 492 } 493 virtual void verify(address insn_addr, address &target) { 494 } 495 }; 496 497 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 498 AArch64Decoder decoder(insn_addr, insn); 499 address target; 500 decoder.run(insn_addr, target); 501 return target; 502 } 503 504 // Patch any kind of instruction; there may be several instructions. 505 // Return the total length (in bytes) of the instructions. 506 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 507 Patcher patcher(insn_addr); 508 return patcher.run(insn_addr, target); 509 } 510 511 int MacroAssembler::patch_oop(address insn_addr, address o) { 512 int instructions; 513 unsigned insn = *(unsigned*)insn_addr; 514 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 515 516 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 517 // narrow OOPs by setting the upper 16 bits in the first 518 // instruction. 519 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 520 // Move narrow OOP 521 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 522 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 523 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 524 instructions = 2; 525 } else { 526 // Move wide OOP 527 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 528 uintptr_t dest = (uintptr_t)o; 529 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 530 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 531 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 532 instructions = 3; 533 } 534 return instructions * NativeInstruction::instruction_size; 535 } 536 537 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 538 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 539 // We encode narrow ones by setting the upper 16 bits in the first 540 // instruction. 541 NativeInstruction *insn = nativeInstruction_at(insn_addr); 542 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 543 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 544 545 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 546 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 547 return 2 * NativeInstruction::instruction_size; 548 } 549 550 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 551 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 552 return nullptr; 553 } 554 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 555 } 556 557 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 558 if (acquire) { 559 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 560 ldar(tmp, tmp); 561 } else { 562 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 563 } 564 if (at_return) { 565 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 566 // we may safely use the sp instead to perform the stack watermark check. 567 cmp(in_nmethod ? sp : rfp, tmp); 568 br(Assembler::HI, slow_path); 569 } else { 570 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 571 } 572 } 573 574 void MacroAssembler::rt_call(address dest, Register tmp) { 575 CodeBlob *cb = CodeCache::find_blob(dest); 576 if (cb) { 577 far_call(RuntimeAddress(dest)); 578 } else { 579 lea(tmp, RuntimeAddress(dest)); 580 blr(tmp); 581 } 582 } 583 584 void MacroAssembler::push_cont_fastpath(Register java_thread) { 585 if (!Continuations::enabled()) return; 586 Label done; 587 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 588 cmp(sp, rscratch1); 589 br(Assembler::LS, done); 590 mov(rscratch1, sp); // we can't use sp as the source in str 591 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 592 bind(done); 593 } 594 595 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 596 if (!Continuations::enabled()) return; 597 Label done; 598 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 599 cmp(sp, rscratch1); 600 br(Assembler::LO, done); 601 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 602 bind(done); 603 } 604 605 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 606 // we must set sp to zero to clear frame 607 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 608 609 // must clear fp, so that compiled frames are not confused; it is 610 // possible that we need it only for debugging 611 if (clear_fp) { 612 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 613 } 614 615 // Always clear the pc because it could have been set by make_walkable() 616 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 617 } 618 619 // Calls to C land 620 // 621 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 622 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 623 // has to be reset to 0. This is required to allow proper stack traversal. 624 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 625 Register last_java_fp, 626 Register last_java_pc, 627 Register scratch) { 628 629 if (last_java_pc->is_valid()) { 630 str(last_java_pc, Address(rthread, 631 JavaThread::frame_anchor_offset() 632 + JavaFrameAnchor::last_Java_pc_offset())); 633 } 634 635 // determine last_java_sp register 636 if (last_java_sp == sp) { 637 mov(scratch, sp); 638 last_java_sp = scratch; 639 } else if (!last_java_sp->is_valid()) { 640 last_java_sp = esp; 641 } 642 643 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 644 645 // last_java_fp is optional 646 if (last_java_fp->is_valid()) { 647 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 648 } 649 } 650 651 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 652 Register last_java_fp, 653 address last_java_pc, 654 Register scratch) { 655 assert(last_java_pc != nullptr, "must provide a valid PC"); 656 657 adr(scratch, last_java_pc); 658 str(scratch, Address(rthread, 659 JavaThread::frame_anchor_offset() 660 + JavaFrameAnchor::last_Java_pc_offset())); 661 662 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 663 } 664 665 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 666 Register last_java_fp, 667 Label &L, 668 Register scratch) { 669 if (L.is_bound()) { 670 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 671 } else { 672 InstructionMark im(this); 673 L.add_patch_at(code(), locator()); 674 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 675 } 676 } 677 678 static inline bool target_needs_far_branch(address addr) { 679 // codecache size <= 128M 680 if (!MacroAssembler::far_branches()) { 681 return false; 682 } 683 // codecache size > 240M 684 if (MacroAssembler::codestub_branch_needs_far_jump()) { 685 return true; 686 } 687 // codecache size: 128M..240M 688 return !CodeCache::is_non_nmethod(addr); 689 } 690 691 void MacroAssembler::far_call(Address entry, Register tmp) { 692 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 693 assert(CodeCache::find_blob(entry.target()) != nullptr, 694 "destination of far call not found in code cache"); 695 assert(entry.rspec().type() == relocInfo::external_word_type 696 || entry.rspec().type() == relocInfo::runtime_call_type 697 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 698 if (target_needs_far_branch(entry.target())) { 699 uint64_t offset; 700 // We can use ADRP here because we know that the total size of 701 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 702 adrp(tmp, entry, offset); 703 add(tmp, tmp, offset); 704 blr(tmp); 705 } else { 706 bl(entry); 707 } 708 } 709 710 int MacroAssembler::far_jump(Address entry, Register tmp) { 711 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 712 assert(CodeCache::find_blob(entry.target()) != nullptr, 713 "destination of far call not found in code cache"); 714 assert(entry.rspec().type() == relocInfo::external_word_type 715 || entry.rspec().type() == relocInfo::runtime_call_type 716 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 717 address start = pc(); 718 if (target_needs_far_branch(entry.target())) { 719 uint64_t offset; 720 // We can use ADRP here because we know that the total size of 721 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 722 adrp(tmp, entry, offset); 723 add(tmp, tmp, offset); 724 br(tmp); 725 } else { 726 b(entry); 727 } 728 return pc() - start; 729 } 730 731 void MacroAssembler::reserved_stack_check() { 732 // testing if reserved zone needs to be enabled 733 Label no_reserved_zone_enabling; 734 735 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 736 cmp(sp, rscratch1); 737 br(Assembler::LO, no_reserved_zone_enabling); 738 739 enter(); // LR and FP are live. 740 lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); 741 mov(c_rarg0, rthread); 742 blr(rscratch1); 743 leave(); 744 745 // We have already removed our own frame. 746 // throw_delayed_StackOverflowError will think that it's been 747 // called by our caller. 748 lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 749 br(rscratch1); 750 should_not_reach_here(); 751 752 bind(no_reserved_zone_enabling); 753 } 754 755 static void pass_arg0(MacroAssembler* masm, Register arg) { 756 if (c_rarg0 != arg ) { 757 masm->mov(c_rarg0, arg); 758 } 759 } 760 761 static void pass_arg1(MacroAssembler* masm, Register arg) { 762 if (c_rarg1 != arg ) { 763 masm->mov(c_rarg1, arg); 764 } 765 } 766 767 static void pass_arg2(MacroAssembler* masm, Register arg) { 768 if (c_rarg2 != arg ) { 769 masm->mov(c_rarg2, arg); 770 } 771 } 772 773 static void pass_arg3(MacroAssembler* masm, Register arg) { 774 if (c_rarg3 != arg ) { 775 masm->mov(c_rarg3, arg); 776 } 777 } 778 779 void MacroAssembler::call_VM_base(Register oop_result, 780 Register java_thread, 781 Register last_java_sp, 782 address entry_point, 783 int number_of_arguments, 784 bool check_exceptions) { 785 // determine java_thread register 786 if (!java_thread->is_valid()) { 787 java_thread = rthread; 788 } 789 790 // determine last_java_sp register 791 if (!last_java_sp->is_valid()) { 792 last_java_sp = esp; 793 } 794 795 // debugging support 796 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 797 assert(java_thread == rthread, "unexpected register"); 798 #ifdef ASSERT 799 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 800 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 801 #endif // ASSERT 802 803 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 804 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 805 806 // push java thread (becomes first argument of C function) 807 808 mov(c_rarg0, java_thread); 809 810 // set last Java frame before call 811 assert(last_java_sp != rfp, "can't use rfp"); 812 813 Label l; 814 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 815 816 // do the call, remove parameters 817 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 818 819 // lr could be poisoned with PAC signature during throw_pending_exception 820 // if it was tail-call optimized by compiler, since lr is not callee-saved 821 // reload it with proper value 822 adr(lr, l); 823 824 // reset last Java frame 825 // Only interpreter should have to clear fp 826 reset_last_Java_frame(true); 827 828 // C++ interp handles this in the interpreter 829 check_and_handle_popframe(java_thread); 830 check_and_handle_earlyret(java_thread); 831 832 if (check_exceptions) { 833 // check for pending exceptions (java_thread is set upon return) 834 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 835 Label ok; 836 cbz(rscratch1, ok); 837 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 838 br(rscratch1); 839 bind(ok); 840 } 841 842 // get oop result if there is one and reset the value in the thread 843 if (oop_result->is_valid()) { 844 get_vm_result(oop_result, java_thread); 845 } 846 } 847 848 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 849 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 850 } 851 852 // Check the entry target is always reachable from any branch. 853 static bool is_always_within_branch_range(Address entry) { 854 const address target = entry.target(); 855 856 if (!CodeCache::contains(target)) { 857 // We always use trampolines for callees outside CodeCache. 858 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 859 return false; 860 } 861 862 if (!MacroAssembler::far_branches()) { 863 return true; 864 } 865 866 if (entry.rspec().type() == relocInfo::runtime_call_type) { 867 // Runtime calls are calls of a non-compiled method (stubs, adapters). 868 // Non-compiled methods stay forever in CodeCache. 869 // We check whether the longest possible branch is within the branch range. 870 assert(CodeCache::find_blob(target) != nullptr && 871 !CodeCache::find_blob(target)->is_compiled(), 872 "runtime call of compiled method"); 873 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 874 const address left_longest_branch_start = CodeCache::low_bound(); 875 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 876 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 877 return is_reachable; 878 } 879 880 return false; 881 } 882 883 // Maybe emit a call via a trampoline. If the code cache is small 884 // trampolines won't be emitted. 885 address MacroAssembler::trampoline_call(Address entry) { 886 assert(entry.rspec().type() == relocInfo::runtime_call_type 887 || entry.rspec().type() == relocInfo::opt_virtual_call_type 888 || entry.rspec().type() == relocInfo::static_call_type 889 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 890 891 address target = entry.target(); 892 893 if (!is_always_within_branch_range(entry)) { 894 if (!in_scratch_emit_size()) { 895 // We don't want to emit a trampoline if C2 is generating dummy 896 // code during its branch shortening phase. 897 if (entry.rspec().type() == relocInfo::runtime_call_type) { 898 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 899 code()->share_trampoline_for(entry.target(), offset()); 900 } else { 901 address stub = emit_trampoline_stub(offset(), target); 902 if (stub == nullptr) { 903 postcond(pc() == badAddress); 904 return nullptr; // CodeCache is full 905 } 906 } 907 } 908 target = pc(); 909 } 910 911 address call_pc = pc(); 912 relocate(entry.rspec()); 913 bl(target); 914 915 postcond(pc() != badAddress); 916 return call_pc; 917 } 918 919 // Emit a trampoline stub for a call to a target which is too far away. 920 // 921 // code sequences: 922 // 923 // call-site: 924 // branch-and-link to <destination> or <trampoline stub> 925 // 926 // Related trampoline stub for this call site in the stub section: 927 // load the call target from the constant pool 928 // branch (LR still points to the call site above) 929 930 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 931 address dest) { 932 // Max stub size: alignment nop, TrampolineStub. 933 address stub = start_a_stub(max_trampoline_stub_size()); 934 if (stub == nullptr) { 935 return nullptr; // CodeBuffer::expand failed 936 } 937 938 // Create a trampoline stub relocation which relates this trampoline stub 939 // with the call instruction at insts_call_instruction_offset in the 940 // instructions code-section. 941 align(wordSize); 942 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 943 + insts_call_instruction_offset)); 944 const int stub_start_offset = offset(); 945 946 // Now, create the trampoline stub's code: 947 // - load the call 948 // - call 949 Label target; 950 ldr(rscratch1, target); 951 br(rscratch1); 952 bind(target); 953 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 954 "should be"); 955 emit_int64((int64_t)dest); 956 957 const address stub_start_addr = addr_at(stub_start_offset); 958 959 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 960 961 end_a_stub(); 962 return stub_start_addr; 963 } 964 965 int MacroAssembler::max_trampoline_stub_size() { 966 // Max stub size: alignment nop, TrampolineStub. 967 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 968 } 969 970 void MacroAssembler::emit_static_call_stub() { 971 // CompiledDirectCall::set_to_interpreted knows the 972 // exact layout of this stub. 973 974 isb(); 975 mov_metadata(rmethod, nullptr); 976 977 // Jump to the entry point of the c2i stub. 978 movptr(rscratch1, 0); 979 br(rscratch1); 980 } 981 982 int MacroAssembler::static_call_stub_size() { 983 // isb; movk; movz; movz; movk; movz; movz; br 984 return 8 * NativeInstruction::instruction_size; 985 } 986 987 void MacroAssembler::c2bool(Register x) { 988 // implements x == 0 ? 0 : 1 989 // note: must only look at least-significant byte of x 990 // since C-style booleans are stored in one byte 991 // only! (was bug) 992 tst(x, 0xff); 993 cset(x, Assembler::NE); 994 } 995 996 address MacroAssembler::ic_call(address entry, jint method_index) { 997 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 998 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 999 // uintptr_t offset; 1000 // ldr_constant(rscratch2, const_ptr); 1001 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1002 return trampoline_call(Address(entry, rh)); 1003 } 1004 1005 int MacroAssembler::ic_check_size() { 1006 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1007 return NativeInstruction::instruction_size * 7; 1008 } else { 1009 return NativeInstruction::instruction_size * 5; 1010 } 1011 } 1012 1013 int MacroAssembler::ic_check(int end_alignment) { 1014 Register receiver = j_rarg0; 1015 Register data = rscratch2; 1016 Register tmp1 = rscratch1; 1017 Register tmp2 = r10; 1018 1019 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1020 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1021 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1022 // before the inline cache check here, and not after 1023 align(end_alignment, offset() + ic_check_size()); 1024 1025 int uep_offset = offset(); 1026 1027 if (UseCompressedClassPointers) { 1028 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1029 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1030 cmpw(tmp1, tmp2); 1031 } else { 1032 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1033 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1034 cmp(tmp1, tmp2); 1035 } 1036 1037 Label dont; 1038 br(Assembler::EQ, dont); 1039 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1040 bind(dont); 1041 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1042 1043 return uep_offset; 1044 } 1045 1046 // Implementation of call_VM versions 1047 1048 void MacroAssembler::call_VM(Register oop_result, 1049 address entry_point, 1050 bool check_exceptions) { 1051 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1052 } 1053 1054 void MacroAssembler::call_VM(Register oop_result, 1055 address entry_point, 1056 Register arg_1, 1057 bool check_exceptions) { 1058 pass_arg1(this, arg_1); 1059 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1060 } 1061 1062 void MacroAssembler::call_VM(Register oop_result, 1063 address entry_point, 1064 Register arg_1, 1065 Register arg_2, 1066 bool check_exceptions) { 1067 assert_different_registers(arg_1, c_rarg2); 1068 pass_arg2(this, arg_2); 1069 pass_arg1(this, arg_1); 1070 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1071 } 1072 1073 void MacroAssembler::call_VM(Register oop_result, 1074 address entry_point, 1075 Register arg_1, 1076 Register arg_2, 1077 Register arg_3, 1078 bool check_exceptions) { 1079 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1080 assert_different_registers(arg_2, c_rarg3); 1081 pass_arg3(this, arg_3); 1082 1083 pass_arg2(this, arg_2); 1084 1085 pass_arg1(this, arg_1); 1086 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1087 } 1088 1089 void MacroAssembler::call_VM(Register oop_result, 1090 Register last_java_sp, 1091 address entry_point, 1092 int number_of_arguments, 1093 bool check_exceptions) { 1094 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1095 } 1096 1097 void MacroAssembler::call_VM(Register oop_result, 1098 Register last_java_sp, 1099 address entry_point, 1100 Register arg_1, 1101 bool check_exceptions) { 1102 pass_arg1(this, arg_1); 1103 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1104 } 1105 1106 void MacroAssembler::call_VM(Register oop_result, 1107 Register last_java_sp, 1108 address entry_point, 1109 Register arg_1, 1110 Register arg_2, 1111 bool check_exceptions) { 1112 1113 assert_different_registers(arg_1, c_rarg2); 1114 pass_arg2(this, arg_2); 1115 pass_arg1(this, arg_1); 1116 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1117 } 1118 1119 void MacroAssembler::call_VM(Register oop_result, 1120 Register last_java_sp, 1121 address entry_point, 1122 Register arg_1, 1123 Register arg_2, 1124 Register arg_3, 1125 bool check_exceptions) { 1126 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1127 assert_different_registers(arg_2, c_rarg3); 1128 pass_arg3(this, arg_3); 1129 pass_arg2(this, arg_2); 1130 pass_arg1(this, arg_1); 1131 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1132 } 1133 1134 1135 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1136 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1137 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1138 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1139 } 1140 1141 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1142 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1143 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1144 } 1145 1146 void MacroAssembler::align(int modulus) { 1147 align(modulus, offset()); 1148 } 1149 1150 // Ensure that the code at target bytes offset from the current offset() is aligned 1151 // according to modulus. 1152 void MacroAssembler::align(int modulus, int target) { 1153 int delta = target - offset(); 1154 while ((offset() + delta) % modulus != 0) nop(); 1155 } 1156 1157 void MacroAssembler::post_call_nop() { 1158 if (!Continuations::enabled()) { 1159 return; 1160 } 1161 InstructionMark im(this); 1162 relocate(post_call_nop_Relocation::spec()); 1163 InlineSkippedInstructionsCounter skipCounter(this); 1164 nop(); 1165 movk(zr, 0); 1166 movk(zr, 0); 1167 } 1168 1169 // these are no-ops overridden by InterpreterMacroAssembler 1170 1171 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1172 1173 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1174 1175 // Look up the method for a megamorphic invokeinterface call. 1176 // The target method is determined by <intf_klass, itable_index>. 1177 // The receiver klass is in recv_klass. 1178 // On success, the result will be in method_result, and execution falls through. 1179 // On failure, execution transfers to the given label. 1180 void MacroAssembler::lookup_interface_method(Register recv_klass, 1181 Register intf_klass, 1182 RegisterOrConstant itable_index, 1183 Register method_result, 1184 Register scan_temp, 1185 Label& L_no_such_interface, 1186 bool return_method) { 1187 assert_different_registers(recv_klass, intf_klass, scan_temp); 1188 assert_different_registers(method_result, intf_klass, scan_temp); 1189 assert(recv_klass != method_result || !return_method, 1190 "recv_klass can be destroyed when method isn't needed"); 1191 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1192 "caller must use same register for non-constant itable index as for method"); 1193 1194 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1195 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1196 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1197 int scan_step = itableOffsetEntry::size() * wordSize; 1198 int vte_size = vtableEntry::size_in_bytes(); 1199 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1200 1201 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1202 1203 // %%% Could store the aligned, prescaled offset in the klassoop. 1204 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1205 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1206 add(scan_temp, scan_temp, vtable_base); 1207 1208 if (return_method) { 1209 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1210 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1211 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1212 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1213 if (itentry_off) 1214 add(recv_klass, recv_klass, itentry_off); 1215 } 1216 1217 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1218 // if (scan->interface() == intf) { 1219 // result = (klass + scan->offset() + itable_index); 1220 // } 1221 // } 1222 Label search, found_method; 1223 1224 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1225 cmp(intf_klass, method_result); 1226 br(Assembler::EQ, found_method); 1227 bind(search); 1228 // Check that the previous entry is non-null. A null entry means that 1229 // the receiver class doesn't implement the interface, and wasn't the 1230 // same as when the caller was compiled. 1231 cbz(method_result, L_no_such_interface); 1232 if (itableOffsetEntry::interface_offset() != 0) { 1233 add(scan_temp, scan_temp, scan_step); 1234 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1235 } else { 1236 ldr(method_result, Address(pre(scan_temp, scan_step))); 1237 } 1238 cmp(intf_klass, method_result); 1239 br(Assembler::NE, search); 1240 1241 bind(found_method); 1242 1243 // Got a hit. 1244 if (return_method) { 1245 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1246 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1247 } 1248 } 1249 1250 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1251 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1252 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1253 // The target method is determined by <holder_klass, itable_index>. 1254 // The receiver klass is in recv_klass. 1255 // On success, the result will be in method_result, and execution falls through. 1256 // On failure, execution transfers to the given label. 1257 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1258 Register holder_klass, 1259 Register resolved_klass, 1260 Register method_result, 1261 Register temp_itbl_klass, 1262 Register scan_temp, 1263 int itable_index, 1264 Label& L_no_such_interface) { 1265 // 'method_result' is only used as output register at the very end of this method. 1266 // Until then we can reuse it as 'holder_offset'. 1267 Register holder_offset = method_result; 1268 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1269 1270 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1271 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1272 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1273 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1274 1275 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1276 1277 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1278 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1279 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1280 // temp_itbl_klass = itable[0]._interface; 1281 int vtblEntrySize = vtableEntry::size_in_bytes(); 1282 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1283 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1284 mov(holder_offset, zr); 1285 // scan_temp = &(itable[0]._interface) 1286 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1287 1288 // Initial checks: 1289 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1290 // - if (itable[0] == holder_klass), shortcut to "holder found" 1291 // - if (itable[0] == 0), no such interface 1292 cmp(resolved_klass, holder_klass); 1293 br(Assembler::NE, L_loop_search_resolved_entry); 1294 cmp(holder_klass, temp_itbl_klass); 1295 br(Assembler::EQ, L_holder_found); 1296 cbz(temp_itbl_klass, L_no_such_interface); 1297 1298 // Loop: Look for holder_klass record in itable 1299 // do { 1300 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1301 // if (temp_itbl_klass == holder_klass) { 1302 // goto L_holder_found; // Found! 1303 // } 1304 // } while (temp_itbl_klass != 0); 1305 // goto L_no_such_interface // Not found. 1306 Label L_search_holder; 1307 bind(L_search_holder); 1308 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1309 cmp(holder_klass, temp_itbl_klass); 1310 br(Assembler::EQ, L_holder_found); 1311 cbnz(temp_itbl_klass, L_search_holder); 1312 1313 b(L_no_such_interface); 1314 1315 // Loop: Look for resolved_class record in itable 1316 // while (true) { 1317 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1318 // if (temp_itbl_klass == 0) { 1319 // goto L_no_such_interface; 1320 // } 1321 // if (temp_itbl_klass == resolved_klass) { 1322 // goto L_resolved_found; // Found! 1323 // } 1324 // if (temp_itbl_klass == holder_klass) { 1325 // holder_offset = scan_temp; 1326 // } 1327 // } 1328 // 1329 Label L_loop_search_resolved; 1330 bind(L_loop_search_resolved); 1331 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1332 bind(L_loop_search_resolved_entry); 1333 cbz(temp_itbl_klass, L_no_such_interface); 1334 cmp(resolved_klass, temp_itbl_klass); 1335 br(Assembler::EQ, L_resolved_found); 1336 cmp(holder_klass, temp_itbl_klass); 1337 br(Assembler::NE, L_loop_search_resolved); 1338 mov(holder_offset, scan_temp); 1339 b(L_loop_search_resolved); 1340 1341 // See if we already have a holder klass. If not, go and scan for it. 1342 bind(L_resolved_found); 1343 cbz(holder_offset, L_search_holder); 1344 mov(scan_temp, holder_offset); 1345 1346 // Finally, scan_temp contains holder_klass vtable offset 1347 bind(L_holder_found); 1348 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1349 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1350 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1351 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1352 } 1353 1354 // virtual method calling 1355 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1356 RegisterOrConstant vtable_index, 1357 Register method_result) { 1358 assert(vtableEntry::size() * wordSize == 8, 1359 "adjust the scaling in the code below"); 1360 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1361 1362 if (vtable_index.is_register()) { 1363 lea(method_result, Address(recv_klass, 1364 vtable_index.as_register(), 1365 Address::lsl(LogBytesPerWord))); 1366 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1367 } else { 1368 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1369 ldr(method_result, 1370 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1371 } 1372 } 1373 1374 void MacroAssembler::check_klass_subtype(Register sub_klass, 1375 Register super_klass, 1376 Register temp_reg, 1377 Label& L_success) { 1378 Label L_failure; 1379 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1380 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1381 bind(L_failure); 1382 } 1383 1384 1385 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1386 Register super_klass, 1387 Register temp_reg, 1388 Label* L_success, 1389 Label* L_failure, 1390 Label* L_slow_path, 1391 RegisterOrConstant super_check_offset) { 1392 assert_different_registers(sub_klass, super_klass, temp_reg); 1393 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1394 if (super_check_offset.is_register()) { 1395 assert_different_registers(sub_klass, super_klass, 1396 super_check_offset.as_register()); 1397 } else if (must_load_sco) { 1398 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1399 } 1400 1401 Label L_fallthrough; 1402 int label_nulls = 0; 1403 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1404 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1405 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1406 assert(label_nulls <= 1, "at most one null in the batch"); 1407 1408 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1409 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1410 Address super_check_offset_addr(super_klass, sco_offset); 1411 1412 // Hacked jmp, which may only be used just before L_fallthrough. 1413 #define final_jmp(label) \ 1414 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1415 else b(label) /*omit semi*/ 1416 1417 // If the pointers are equal, we are done (e.g., String[] elements). 1418 // This self-check enables sharing of secondary supertype arrays among 1419 // non-primary types such as array-of-interface. Otherwise, each such 1420 // type would need its own customized SSA. 1421 // We move this check to the front of the fast path because many 1422 // type checks are in fact trivially successful in this manner, 1423 // so we get a nicely predicted branch right at the start of the check. 1424 cmp(sub_klass, super_klass); 1425 br(Assembler::EQ, *L_success); 1426 1427 // Check the supertype display: 1428 if (must_load_sco) { 1429 ldrw(temp_reg, super_check_offset_addr); 1430 super_check_offset = RegisterOrConstant(temp_reg); 1431 } 1432 Address super_check_addr(sub_klass, super_check_offset); 1433 ldr(rscratch1, super_check_addr); 1434 cmp(super_klass, rscratch1); // load displayed supertype 1435 1436 // This check has worked decisively for primary supers. 1437 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1438 // (Secondary supers are interfaces and very deeply nested subtypes.) 1439 // This works in the same check above because of a tricky aliasing 1440 // between the super_cache and the primary super display elements. 1441 // (The 'super_check_addr' can address either, as the case requires.) 1442 // Note that the cache is updated below if it does not help us find 1443 // what we need immediately. 1444 // So if it was a primary super, we can just fail immediately. 1445 // Otherwise, it's the slow path for us (no success at this point). 1446 1447 if (super_check_offset.is_register()) { 1448 br(Assembler::EQ, *L_success); 1449 subs(zr, super_check_offset.as_register(), sc_offset); 1450 if (L_failure == &L_fallthrough) { 1451 br(Assembler::EQ, *L_slow_path); 1452 } else { 1453 br(Assembler::NE, *L_failure); 1454 final_jmp(*L_slow_path); 1455 } 1456 } else if (super_check_offset.as_constant() == sc_offset) { 1457 // Need a slow path; fast failure is impossible. 1458 if (L_slow_path == &L_fallthrough) { 1459 br(Assembler::EQ, *L_success); 1460 } else { 1461 br(Assembler::NE, *L_slow_path); 1462 final_jmp(*L_success); 1463 } 1464 } else { 1465 // No slow path; it's a fast decision. 1466 if (L_failure == &L_fallthrough) { 1467 br(Assembler::EQ, *L_success); 1468 } else { 1469 br(Assembler::NE, *L_failure); 1470 final_jmp(*L_success); 1471 } 1472 } 1473 1474 bind(L_fallthrough); 1475 1476 #undef final_jmp 1477 } 1478 1479 // These two are taken from x86, but they look generally useful 1480 1481 // scans count pointer sized words at [addr] for occurrence of value, 1482 // generic 1483 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1484 Register scratch) { 1485 Label Lloop, Lexit; 1486 cbz(count, Lexit); 1487 bind(Lloop); 1488 ldr(scratch, post(addr, wordSize)); 1489 cmp(value, scratch); 1490 br(EQ, Lexit); 1491 sub(count, count, 1); 1492 cbnz(count, Lloop); 1493 bind(Lexit); 1494 } 1495 1496 // scans count 4 byte words at [addr] for occurrence of value, 1497 // generic 1498 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1499 Register scratch) { 1500 Label Lloop, Lexit; 1501 cbz(count, Lexit); 1502 bind(Lloop); 1503 ldrw(scratch, post(addr, wordSize)); 1504 cmpw(value, scratch); 1505 br(EQ, Lexit); 1506 sub(count, count, 1); 1507 cbnz(count, Lloop); 1508 bind(Lexit); 1509 } 1510 1511 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1512 Register super_klass, 1513 Register temp_reg, 1514 Register temp2_reg, 1515 Label* L_success, 1516 Label* L_failure, 1517 bool set_cond_codes) { 1518 assert_different_registers(sub_klass, super_klass, temp_reg); 1519 if (temp2_reg != noreg) 1520 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1521 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1522 1523 Label L_fallthrough; 1524 int label_nulls = 0; 1525 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1526 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1527 assert(label_nulls <= 1, "at most one null in the batch"); 1528 1529 // a couple of useful fields in sub_klass: 1530 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1531 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1532 Address secondary_supers_addr(sub_klass, ss_offset); 1533 Address super_cache_addr( sub_klass, sc_offset); 1534 1535 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1536 1537 // Do a linear scan of the secondary super-klass chain. 1538 // This code is rarely used, so simplicity is a virtue here. 1539 // The repne_scan instruction uses fixed registers, which we must spill. 1540 // Don't worry too much about pre-existing connections with the input regs. 1541 1542 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1543 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1544 1545 RegSet pushed_registers; 1546 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1547 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1548 1549 if (super_klass != r0) { 1550 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1551 } 1552 1553 push(pushed_registers, sp); 1554 1555 // Get super_klass value into r0 (even if it was in r5 or r2). 1556 if (super_klass != r0) { 1557 mov(r0, super_klass); 1558 } 1559 1560 #ifndef PRODUCT 1561 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1562 Address pst_counter_addr(rscratch2); 1563 ldr(rscratch1, pst_counter_addr); 1564 add(rscratch1, rscratch1, 1); 1565 str(rscratch1, pst_counter_addr); 1566 #endif //PRODUCT 1567 1568 // We will consult the secondary-super array. 1569 ldr(r5, secondary_supers_addr); 1570 // Load the array length. 1571 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1572 // Skip to start of data. 1573 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1574 1575 cmp(sp, zr); // Clear Z flag; SP is never zero 1576 // Scan R2 words at [R5] for an occurrence of R0. 1577 // Set NZ/Z based on last compare. 1578 repne_scan(r5, r0, r2, rscratch1); 1579 1580 // Unspill the temp. registers: 1581 pop(pushed_registers, sp); 1582 1583 br(Assembler::NE, *L_failure); 1584 1585 // Success. Cache the super we found and proceed in triumph. 1586 str(super_klass, super_cache_addr); 1587 1588 if (L_success != &L_fallthrough) { 1589 b(*L_success); 1590 } 1591 1592 #undef IS_A_TEMP 1593 1594 bind(L_fallthrough); 1595 } 1596 1597 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1598 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1599 assert_different_registers(klass, rthread, scratch); 1600 1601 Label L_fallthrough, L_tmp; 1602 if (L_fast_path == nullptr) { 1603 L_fast_path = &L_fallthrough; 1604 } else if (L_slow_path == nullptr) { 1605 L_slow_path = &L_fallthrough; 1606 } 1607 // Fast path check: class is fully initialized 1608 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset())); 1609 subs(zr, scratch, InstanceKlass::fully_initialized); 1610 br(Assembler::EQ, *L_fast_path); 1611 1612 // Fast path check: current thread is initializer thread 1613 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1614 cmp(rthread, scratch); 1615 1616 if (L_slow_path == &L_fallthrough) { 1617 br(Assembler::EQ, *L_fast_path); 1618 bind(*L_slow_path); 1619 } else if (L_fast_path == &L_fallthrough) { 1620 br(Assembler::NE, *L_slow_path); 1621 bind(*L_fast_path); 1622 } else { 1623 Unimplemented(); 1624 } 1625 } 1626 1627 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1628 if (!VerifyOops) return; 1629 1630 // Pass register number to verify_oop_subroutine 1631 const char* b = nullptr; 1632 { 1633 ResourceMark rm; 1634 stringStream ss; 1635 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1636 b = code_string(ss.as_string()); 1637 } 1638 BLOCK_COMMENT("verify_oop {"); 1639 1640 strip_return_address(); // This might happen within a stack frame. 1641 protect_return_address(); 1642 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1643 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1644 1645 mov(r0, reg); 1646 movptr(rscratch1, (uintptr_t)(address)b); 1647 1648 // call indirectly to solve generation ordering problem 1649 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1650 ldr(rscratch2, Address(rscratch2)); 1651 blr(rscratch2); 1652 1653 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1654 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1655 authenticate_return_address(); 1656 1657 BLOCK_COMMENT("} verify_oop"); 1658 } 1659 1660 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1661 if (!VerifyOops) return; 1662 1663 const char* b = nullptr; 1664 { 1665 ResourceMark rm; 1666 stringStream ss; 1667 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1668 b = code_string(ss.as_string()); 1669 } 1670 BLOCK_COMMENT("verify_oop_addr {"); 1671 1672 strip_return_address(); // This might happen within a stack frame. 1673 protect_return_address(); 1674 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1675 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1676 1677 // addr may contain sp so we will have to adjust it based on the 1678 // pushes that we just did. 1679 if (addr.uses(sp)) { 1680 lea(r0, addr); 1681 ldr(r0, Address(r0, 4 * wordSize)); 1682 } else { 1683 ldr(r0, addr); 1684 } 1685 movptr(rscratch1, (uintptr_t)(address)b); 1686 1687 // call indirectly to solve generation ordering problem 1688 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1689 ldr(rscratch2, Address(rscratch2)); 1690 blr(rscratch2); 1691 1692 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1693 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1694 authenticate_return_address(); 1695 1696 BLOCK_COMMENT("} verify_oop_addr"); 1697 } 1698 1699 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1700 int extra_slot_offset) { 1701 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1702 int stackElementSize = Interpreter::stackElementSize; 1703 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1704 #ifdef ASSERT 1705 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1706 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1707 #endif 1708 if (arg_slot.is_constant()) { 1709 return Address(esp, arg_slot.as_constant() * stackElementSize 1710 + offset); 1711 } else { 1712 add(rscratch1, esp, arg_slot.as_register(), 1713 ext::uxtx, exact_log2(stackElementSize)); 1714 return Address(rscratch1, offset); 1715 } 1716 } 1717 1718 void MacroAssembler::call_VM_leaf_base(address entry_point, 1719 int number_of_arguments, 1720 Label *retaddr) { 1721 Label not_preempted; 1722 1723 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1724 str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset())); 1725 1726 mov(rscratch1, entry_point); 1727 blr(rscratch1); 1728 if (retaddr) 1729 bind(*retaddr); 1730 1731 if (entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter) || 1732 entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj)) { 1733 ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset())); 1734 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::preempt_alternate_return_offset()) + wordSize)); 1735 cbz(rscratch1, not_preempted); 1736 mov(r4, sp); // r4 is clobbered by VM calls, so free here 1737 cmp(rscratch2, r4); 1738 br(LT, not_preempted); 1739 str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset())); 1740 str(zr, Address(rthread, in_bytes(JavaThread::preempt_alternate_return_offset()) + wordSize)); 1741 br(rscratch1); 1742 } 1743 1744 bind(not_preempted); 1745 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1746 } 1747 1748 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1749 call_VM_leaf_base(entry_point, number_of_arguments); 1750 } 1751 1752 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1753 pass_arg0(this, arg_0); 1754 call_VM_leaf_base(entry_point, 1); 1755 } 1756 1757 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1758 assert_different_registers(arg_1, c_rarg0); 1759 pass_arg0(this, arg_0); 1760 pass_arg1(this, arg_1); 1761 call_VM_leaf_base(entry_point, 2); 1762 } 1763 1764 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1765 Register arg_1, Register arg_2) { 1766 assert_different_registers(arg_1, c_rarg0); 1767 assert_different_registers(arg_2, c_rarg0, c_rarg1); 1768 pass_arg0(this, arg_0); 1769 pass_arg1(this, arg_1); 1770 pass_arg2(this, arg_2); 1771 call_VM_leaf_base(entry_point, 3); 1772 } 1773 1774 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1775 pass_arg0(this, arg_0); 1776 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1777 } 1778 1779 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1780 1781 assert_different_registers(arg_0, c_rarg1); 1782 pass_arg1(this, arg_1); 1783 pass_arg0(this, arg_0); 1784 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1785 } 1786 1787 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1788 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1789 assert_different_registers(arg_1, c_rarg2); 1790 pass_arg2(this, arg_2); 1791 pass_arg1(this, arg_1); 1792 pass_arg0(this, arg_0); 1793 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1794 } 1795 1796 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1797 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1798 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1799 assert_different_registers(arg_2, c_rarg3); 1800 pass_arg3(this, arg_3); 1801 pass_arg2(this, arg_2); 1802 pass_arg1(this, arg_1); 1803 pass_arg0(this, arg_0); 1804 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1805 } 1806 1807 void MacroAssembler::null_check(Register reg, int offset) { 1808 if (needs_explicit_null_check(offset)) { 1809 // provoke OS null exception if reg is null by 1810 // accessing M[reg] w/o changing any registers 1811 // NOTE: this is plenty to provoke a segv 1812 ldr(zr, Address(reg)); 1813 } else { 1814 // nothing to do, (later) access of M[reg + offset] 1815 // will provoke OS null exception if reg is null 1816 } 1817 } 1818 1819 // MacroAssembler protected routines needed to implement 1820 // public methods 1821 1822 void MacroAssembler::mov(Register r, Address dest) { 1823 code_section()->relocate(pc(), dest.rspec()); 1824 uint64_t imm64 = (uint64_t)dest.target(); 1825 movptr(r, imm64); 1826 } 1827 1828 // Move a constant pointer into r. In AArch64 mode the virtual 1829 // address space is 48 bits in size, so we only need three 1830 // instructions to create a patchable instruction sequence that can 1831 // reach anywhere. 1832 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1833 #ifndef PRODUCT 1834 { 1835 char buffer[64]; 1836 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 1837 block_comment(buffer); 1838 } 1839 #endif 1840 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 1841 movz(r, imm64 & 0xffff); 1842 imm64 >>= 16; 1843 movk(r, imm64 & 0xffff, 16); 1844 imm64 >>= 16; 1845 movk(r, imm64 & 0xffff, 32); 1846 } 1847 1848 // Macro to mov replicated immediate to vector register. 1849 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 1850 // the upper 56/48/32 bits must be zeros for B/H/S type. 1851 // Vd will get the following values for different arrangements in T 1852 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 1853 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1854 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 1855 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1856 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1857 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1858 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 1859 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 1860 // Clobbers rscratch1 1861 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 1862 assert(T != T1Q, "unsupported"); 1863 if (T == T1D || T == T2D) { 1864 int imm = operand_valid_for_movi_immediate(imm64, T); 1865 if (-1 != imm) { 1866 movi(Vd, T, imm); 1867 } else { 1868 mov(rscratch1, imm64); 1869 dup(Vd, T, rscratch1); 1870 } 1871 return; 1872 } 1873 1874 #ifdef ASSERT 1875 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 1876 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 1877 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 1878 #endif 1879 int shift = operand_valid_for_movi_immediate(imm64, T); 1880 uint32_t imm32 = imm64 & 0xffffffffULL; 1881 if (shift >= 0) { 1882 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 1883 } else { 1884 movw(rscratch1, imm32); 1885 dup(Vd, T, rscratch1); 1886 } 1887 } 1888 1889 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 1890 { 1891 #ifndef PRODUCT 1892 { 1893 char buffer[64]; 1894 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 1895 block_comment(buffer); 1896 } 1897 #endif 1898 if (operand_valid_for_logical_immediate(false, imm64)) { 1899 orr(dst, zr, imm64); 1900 } else { 1901 // we can use a combination of MOVZ or MOVN with 1902 // MOVK to build up the constant 1903 uint64_t imm_h[4]; 1904 int zero_count = 0; 1905 int neg_count = 0; 1906 int i; 1907 for (i = 0; i < 4; i++) { 1908 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1909 if (imm_h[i] == 0) { 1910 zero_count++; 1911 } else if (imm_h[i] == 0xffffL) { 1912 neg_count++; 1913 } 1914 } 1915 if (zero_count == 4) { 1916 // one MOVZ will do 1917 movz(dst, 0); 1918 } else if (neg_count == 4) { 1919 // one MOVN will do 1920 movn(dst, 0); 1921 } else if (zero_count == 3) { 1922 for (i = 0; i < 4; i++) { 1923 if (imm_h[i] != 0L) { 1924 movz(dst, (uint32_t)imm_h[i], (i << 4)); 1925 break; 1926 } 1927 } 1928 } else if (neg_count == 3) { 1929 // one MOVN will do 1930 for (int i = 0; i < 4; i++) { 1931 if (imm_h[i] != 0xffffL) { 1932 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1933 break; 1934 } 1935 } 1936 } else if (zero_count == 2) { 1937 // one MOVZ and one MOVK will do 1938 for (i = 0; i < 3; i++) { 1939 if (imm_h[i] != 0L) { 1940 movz(dst, (uint32_t)imm_h[i], (i << 4)); 1941 i++; 1942 break; 1943 } 1944 } 1945 for (;i < 4; i++) { 1946 if (imm_h[i] != 0L) { 1947 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1948 } 1949 } 1950 } else if (neg_count == 2) { 1951 // one MOVN and one MOVK will do 1952 for (i = 0; i < 4; i++) { 1953 if (imm_h[i] != 0xffffL) { 1954 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1955 i++; 1956 break; 1957 } 1958 } 1959 for (;i < 4; i++) { 1960 if (imm_h[i] != 0xffffL) { 1961 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1962 } 1963 } 1964 } else if (zero_count == 1) { 1965 // one MOVZ and two MOVKs will do 1966 for (i = 0; i < 4; i++) { 1967 if (imm_h[i] != 0L) { 1968 movz(dst, (uint32_t)imm_h[i], (i << 4)); 1969 i++; 1970 break; 1971 } 1972 } 1973 for (;i < 4; i++) { 1974 if (imm_h[i] != 0x0L) { 1975 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1976 } 1977 } 1978 } else if (neg_count == 1) { 1979 // one MOVN and two MOVKs will do 1980 for (i = 0; i < 4; i++) { 1981 if (imm_h[i] != 0xffffL) { 1982 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1983 i++; 1984 break; 1985 } 1986 } 1987 for (;i < 4; i++) { 1988 if (imm_h[i] != 0xffffL) { 1989 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1990 } 1991 } 1992 } else { 1993 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1994 movz(dst, (uint32_t)imm_h[0], 0); 1995 for (i = 1; i < 4; i++) { 1996 movk(dst, (uint32_t)imm_h[i], (i << 4)); 1997 } 1998 } 1999 } 2000 } 2001 2002 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2003 { 2004 #ifndef PRODUCT 2005 { 2006 char buffer[64]; 2007 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2008 block_comment(buffer); 2009 } 2010 #endif 2011 if (operand_valid_for_logical_immediate(true, imm32)) { 2012 orrw(dst, zr, imm32); 2013 } else { 2014 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2015 // constant 2016 uint32_t imm_h[2]; 2017 imm_h[0] = imm32 & 0xffff; 2018 imm_h[1] = ((imm32 >> 16) & 0xffff); 2019 if (imm_h[0] == 0) { 2020 movzw(dst, imm_h[1], 16); 2021 } else if (imm_h[0] == 0xffff) { 2022 movnw(dst, imm_h[1] ^ 0xffff, 16); 2023 } else if (imm_h[1] == 0) { 2024 movzw(dst, imm_h[0], 0); 2025 } else if (imm_h[1] == 0xffff) { 2026 movnw(dst, imm_h[0] ^ 0xffff, 0); 2027 } else { 2028 // use a MOVZ and MOVK (makes it easier to debug) 2029 movzw(dst, imm_h[0], 0); 2030 movkw(dst, imm_h[1], 16); 2031 } 2032 } 2033 } 2034 2035 // Form an address from base + offset in Rd. Rd may or may 2036 // not actually be used: you must use the Address that is returned. 2037 // It is up to you to ensure that the shift provided matches the size 2038 // of your data. 2039 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2040 if (Address::offset_ok_for_immed(byte_offset, shift)) 2041 // It fits; no need for any heroics 2042 return Address(base, byte_offset); 2043 2044 // Don't do anything clever with negative or misaligned offsets 2045 unsigned mask = (1 << shift) - 1; 2046 if (byte_offset < 0 || byte_offset & mask) { 2047 mov(Rd, byte_offset); 2048 add(Rd, base, Rd); 2049 return Address(Rd); 2050 } 2051 2052 // See if we can do this with two 12-bit offsets 2053 { 2054 uint64_t word_offset = byte_offset >> shift; 2055 uint64_t masked_offset = word_offset & 0xfff000; 2056 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2057 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2058 add(Rd, base, masked_offset << shift); 2059 word_offset -= masked_offset; 2060 return Address(Rd, word_offset << shift); 2061 } 2062 } 2063 2064 // Do it the hard way 2065 mov(Rd, byte_offset); 2066 add(Rd, base, Rd); 2067 return Address(Rd); 2068 } 2069 2070 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2071 bool want_remainder, Register scratch) 2072 { 2073 // Full implementation of Java idiv and irem. The function 2074 // returns the (pc) offset of the div instruction - may be needed 2075 // for implicit exceptions. 2076 // 2077 // constraint : ra/rb =/= scratch 2078 // normal case 2079 // 2080 // input : ra: dividend 2081 // rb: divisor 2082 // 2083 // result: either 2084 // quotient (= ra idiv rb) 2085 // remainder (= ra irem rb) 2086 2087 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2088 2089 int idivl_offset = offset(); 2090 if (! want_remainder) { 2091 sdivw(result, ra, rb); 2092 } else { 2093 sdivw(scratch, ra, rb); 2094 Assembler::msubw(result, scratch, rb, ra); 2095 } 2096 2097 return idivl_offset; 2098 } 2099 2100 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2101 bool want_remainder, Register scratch) 2102 { 2103 // Full implementation of Java ldiv and lrem. The function 2104 // returns the (pc) offset of the div instruction - may be needed 2105 // for implicit exceptions. 2106 // 2107 // constraint : ra/rb =/= scratch 2108 // normal case 2109 // 2110 // input : ra: dividend 2111 // rb: divisor 2112 // 2113 // result: either 2114 // quotient (= ra idiv rb) 2115 // remainder (= ra irem rb) 2116 2117 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2118 2119 int idivq_offset = offset(); 2120 if (! want_remainder) { 2121 sdiv(result, ra, rb); 2122 } else { 2123 sdiv(scratch, ra, rb); 2124 Assembler::msub(result, scratch, rb, ra); 2125 } 2126 2127 return idivq_offset; 2128 } 2129 2130 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2131 address prev = pc() - NativeMembar::instruction_size; 2132 address last = code()->last_insn(); 2133 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2134 NativeMembar *bar = NativeMembar_at(prev); 2135 // We are merging two memory barrier instructions. On AArch64 we 2136 // can do this simply by ORing them together. 2137 bar->set_kind(bar->get_kind() | order_constraint); 2138 BLOCK_COMMENT("merged membar"); 2139 } else { 2140 code()->set_last_insn(pc()); 2141 dmb(Assembler::barrier(order_constraint)); 2142 } 2143 } 2144 2145 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2146 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2147 merge_ldst(rt, adr, size_in_bytes, is_store); 2148 code()->clear_last_insn(); 2149 return true; 2150 } else { 2151 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2152 const uint64_t mask = size_in_bytes - 1; 2153 if (adr.getMode() == Address::base_plus_offset && 2154 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2155 code()->set_last_insn(pc()); 2156 } 2157 return false; 2158 } 2159 } 2160 2161 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2162 // We always try to merge two adjacent loads into one ldp. 2163 if (!try_merge_ldst(Rx, adr, 8, false)) { 2164 Assembler::ldr(Rx, adr); 2165 } 2166 } 2167 2168 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2169 // We always try to merge two adjacent loads into one ldp. 2170 if (!try_merge_ldst(Rw, adr, 4, false)) { 2171 Assembler::ldrw(Rw, adr); 2172 } 2173 } 2174 2175 void MacroAssembler::str(Register Rx, const Address &adr) { 2176 // We always try to merge two adjacent stores into one stp. 2177 if (!try_merge_ldst(Rx, adr, 8, true)) { 2178 Assembler::str(Rx, adr); 2179 } 2180 } 2181 2182 void MacroAssembler::strw(Register Rw, const Address &adr) { 2183 // We always try to merge two adjacent stores into one stp. 2184 if (!try_merge_ldst(Rw, adr, 4, true)) { 2185 Assembler::strw(Rw, adr); 2186 } 2187 } 2188 2189 // MacroAssembler routines found actually to be needed 2190 2191 void MacroAssembler::push(Register src) 2192 { 2193 str(src, Address(pre(esp, -1 * wordSize))); 2194 } 2195 2196 void MacroAssembler::pop(Register dst) 2197 { 2198 ldr(dst, Address(post(esp, 1 * wordSize))); 2199 } 2200 2201 // Note: load_unsigned_short used to be called load_unsigned_word. 2202 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2203 int off = offset(); 2204 ldrh(dst, src); 2205 return off; 2206 } 2207 2208 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2209 int off = offset(); 2210 ldrb(dst, src); 2211 return off; 2212 } 2213 2214 int MacroAssembler::load_signed_short(Register dst, Address src) { 2215 int off = offset(); 2216 ldrsh(dst, src); 2217 return off; 2218 } 2219 2220 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2221 int off = offset(); 2222 ldrsb(dst, src); 2223 return off; 2224 } 2225 2226 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2227 int off = offset(); 2228 ldrshw(dst, src); 2229 return off; 2230 } 2231 2232 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2233 int off = offset(); 2234 ldrsbw(dst, src); 2235 return off; 2236 } 2237 2238 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2239 switch (size_in_bytes) { 2240 case 8: ldr(dst, src); break; 2241 case 4: ldrw(dst, src); break; 2242 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2243 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2244 default: ShouldNotReachHere(); 2245 } 2246 } 2247 2248 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2249 switch (size_in_bytes) { 2250 case 8: str(src, dst); break; 2251 case 4: strw(src, dst); break; 2252 case 2: strh(src, dst); break; 2253 case 1: strb(src, dst); break; 2254 default: ShouldNotReachHere(); 2255 } 2256 } 2257 2258 void MacroAssembler::decrementw(Register reg, int value) 2259 { 2260 if (value < 0) { incrementw(reg, -value); return; } 2261 if (value == 0) { return; } 2262 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2263 /* else */ { 2264 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2265 movw(rscratch2, (unsigned)value); 2266 subw(reg, reg, rscratch2); 2267 } 2268 } 2269 2270 void MacroAssembler::decrement(Register reg, int value) 2271 { 2272 if (value < 0) { increment(reg, -value); return; } 2273 if (value == 0) { return; } 2274 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2275 /* else */ { 2276 assert(reg != rscratch2, "invalid dst for register decrement"); 2277 mov(rscratch2, (uint64_t)value); 2278 sub(reg, reg, rscratch2); 2279 } 2280 } 2281 2282 void MacroAssembler::decrementw(Address dst, int value) 2283 { 2284 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2285 if (dst.getMode() == Address::literal) { 2286 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2287 lea(rscratch2, dst); 2288 dst = Address(rscratch2); 2289 } 2290 ldrw(rscratch1, dst); 2291 decrementw(rscratch1, value); 2292 strw(rscratch1, dst); 2293 } 2294 2295 void MacroAssembler::decrement(Address dst, int value) 2296 { 2297 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2298 if (dst.getMode() == Address::literal) { 2299 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2300 lea(rscratch2, dst); 2301 dst = Address(rscratch2); 2302 } 2303 ldr(rscratch1, dst); 2304 decrement(rscratch1, value); 2305 str(rscratch1, dst); 2306 } 2307 2308 void MacroAssembler::incrementw(Register reg, int value) 2309 { 2310 if (value < 0) { decrementw(reg, -value); return; } 2311 if (value == 0) { return; } 2312 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2313 /* else */ { 2314 assert(reg != rscratch2, "invalid dst for register increment"); 2315 movw(rscratch2, (unsigned)value); 2316 addw(reg, reg, rscratch2); 2317 } 2318 } 2319 2320 void MacroAssembler::increment(Register reg, int value) 2321 { 2322 if (value < 0) { decrement(reg, -value); return; } 2323 if (value == 0) { return; } 2324 if (value < (1 << 12)) { add(reg, reg, value); return; } 2325 /* else */ { 2326 assert(reg != rscratch2, "invalid dst for register increment"); 2327 movw(rscratch2, (unsigned)value); 2328 add(reg, reg, rscratch2); 2329 } 2330 } 2331 2332 void MacroAssembler::incrementw(Address dst, int value) 2333 { 2334 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2335 if (dst.getMode() == Address::literal) { 2336 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2337 lea(rscratch2, dst); 2338 dst = Address(rscratch2); 2339 } 2340 ldrw(rscratch1, dst); 2341 incrementw(rscratch1, value); 2342 strw(rscratch1, dst); 2343 } 2344 2345 void MacroAssembler::increment(Address dst, int value) 2346 { 2347 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2348 if (dst.getMode() == Address::literal) { 2349 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2350 lea(rscratch2, dst); 2351 dst = Address(rscratch2); 2352 } 2353 ldr(rscratch1, dst); 2354 increment(rscratch1, value); 2355 str(rscratch1, dst); 2356 } 2357 2358 // Push lots of registers in the bit set supplied. Don't push sp. 2359 // Return the number of words pushed 2360 int MacroAssembler::push(unsigned int bitset, Register stack) { 2361 int words_pushed = 0; 2362 2363 // Scan bitset to accumulate register pairs 2364 unsigned char regs[32]; 2365 int count = 0; 2366 for (int reg = 0; reg <= 30; reg++) { 2367 if (1 & bitset) 2368 regs[count++] = reg; 2369 bitset >>= 1; 2370 } 2371 regs[count++] = zr->raw_encoding(); 2372 count &= ~1; // Only push an even number of regs 2373 2374 if (count) { 2375 stp(as_Register(regs[0]), as_Register(regs[1]), 2376 Address(pre(stack, -count * wordSize))); 2377 words_pushed += 2; 2378 } 2379 for (int i = 2; i < count; i += 2) { 2380 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2381 Address(stack, i * wordSize)); 2382 words_pushed += 2; 2383 } 2384 2385 assert(words_pushed == count, "oops, pushed != count"); 2386 2387 return count; 2388 } 2389 2390 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2391 int words_pushed = 0; 2392 2393 // Scan bitset to accumulate register pairs 2394 unsigned char regs[32]; 2395 int count = 0; 2396 for (int reg = 0; reg <= 30; reg++) { 2397 if (1 & bitset) 2398 regs[count++] = reg; 2399 bitset >>= 1; 2400 } 2401 regs[count++] = zr->raw_encoding(); 2402 count &= ~1; 2403 2404 for (int i = 2; i < count; i += 2) { 2405 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2406 Address(stack, i * wordSize)); 2407 words_pushed += 2; 2408 } 2409 if (count) { 2410 ldp(as_Register(regs[0]), as_Register(regs[1]), 2411 Address(post(stack, count * wordSize))); 2412 words_pushed += 2; 2413 } 2414 2415 assert(words_pushed == count, "oops, pushed != count"); 2416 2417 return count; 2418 } 2419 2420 // Push lots of registers in the bit set supplied. Don't push sp. 2421 // Return the number of dwords pushed 2422 int MacroAssembler::push_fp(unsigned int bitset, Register stack) { 2423 int words_pushed = 0; 2424 bool use_sve = false; 2425 int sve_vector_size_in_bytes = 0; 2426 2427 #ifdef COMPILER2 2428 use_sve = Matcher::supports_scalable_vector(); 2429 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2430 #endif 2431 2432 // Scan bitset to accumulate register pairs 2433 unsigned char regs[32]; 2434 int count = 0; 2435 for (int reg = 0; reg <= 31; reg++) { 2436 if (1 & bitset) 2437 regs[count++] = reg; 2438 bitset >>= 1; 2439 } 2440 2441 if (count == 0) { 2442 return 0; 2443 } 2444 2445 // SVE 2446 if (use_sve && sve_vector_size_in_bytes > 16) { 2447 sub(stack, stack, sve_vector_size_in_bytes * count); 2448 for (int i = 0; i < count; i++) { 2449 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2450 } 2451 return count * sve_vector_size_in_bytes / 8; 2452 } 2453 2454 // NEON 2455 if (count == 1) { 2456 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2457 return 2; 2458 } 2459 2460 bool odd = (count & 1) == 1; 2461 int push_slots = count + (odd ? 1 : 0); 2462 2463 // Always pushing full 128 bit registers. 2464 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2465 words_pushed += 2; 2466 2467 for (int i = 2; i + 1 < count; i += 2) { 2468 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2469 words_pushed += 2; 2470 } 2471 2472 if (odd) { 2473 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2474 words_pushed++; 2475 } 2476 2477 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2478 return count * 2; 2479 } 2480 2481 // Return the number of dwords popped 2482 int MacroAssembler::pop_fp(unsigned int bitset, Register stack) { 2483 int words_pushed = 0; 2484 bool use_sve = false; 2485 int sve_vector_size_in_bytes = 0; 2486 2487 #ifdef COMPILER2 2488 use_sve = Matcher::supports_scalable_vector(); 2489 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2490 #endif 2491 // Scan bitset to accumulate register pairs 2492 unsigned char regs[32]; 2493 int count = 0; 2494 for (int reg = 0; reg <= 31; reg++) { 2495 if (1 & bitset) 2496 regs[count++] = reg; 2497 bitset >>= 1; 2498 } 2499 2500 if (count == 0) { 2501 return 0; 2502 } 2503 2504 // SVE 2505 if (use_sve && sve_vector_size_in_bytes > 16) { 2506 for (int i = count - 1; i >= 0; i--) { 2507 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2508 } 2509 add(stack, stack, sve_vector_size_in_bytes * count); 2510 return count * sve_vector_size_in_bytes / 8; 2511 } 2512 2513 // NEON 2514 if (count == 1) { 2515 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2516 return 2; 2517 } 2518 2519 bool odd = (count & 1) == 1; 2520 int push_slots = count + (odd ? 1 : 0); 2521 2522 if (odd) { 2523 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2524 words_pushed++; 2525 } 2526 2527 for (int i = 2; i + 1 < count; i += 2) { 2528 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2529 words_pushed += 2; 2530 } 2531 2532 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2533 words_pushed += 2; 2534 2535 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2536 2537 return count * 2; 2538 } 2539 2540 // Return the number of dwords pushed 2541 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2542 bool use_sve = false; 2543 int sve_predicate_size_in_slots = 0; 2544 2545 #ifdef COMPILER2 2546 use_sve = Matcher::supports_scalable_vector(); 2547 if (use_sve) { 2548 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2549 } 2550 #endif 2551 2552 if (!use_sve) { 2553 return 0; 2554 } 2555 2556 unsigned char regs[PRegister::number_of_registers]; 2557 int count = 0; 2558 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2559 if (1 & bitset) 2560 regs[count++] = reg; 2561 bitset >>= 1; 2562 } 2563 2564 if (count == 0) { 2565 return 0; 2566 } 2567 2568 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2569 VMRegImpl::stack_slot_size * count, 16); 2570 sub(stack, stack, total_push_bytes); 2571 for (int i = 0; i < count; i++) { 2572 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2573 } 2574 return total_push_bytes / 8; 2575 } 2576 2577 // Return the number of dwords popped 2578 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2579 bool use_sve = false; 2580 int sve_predicate_size_in_slots = 0; 2581 2582 #ifdef COMPILER2 2583 use_sve = Matcher::supports_scalable_vector(); 2584 if (use_sve) { 2585 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2586 } 2587 #endif 2588 2589 if (!use_sve) { 2590 return 0; 2591 } 2592 2593 unsigned char regs[PRegister::number_of_registers]; 2594 int count = 0; 2595 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2596 if (1 & bitset) 2597 regs[count++] = reg; 2598 bitset >>= 1; 2599 } 2600 2601 if (count == 0) { 2602 return 0; 2603 } 2604 2605 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2606 VMRegImpl::stack_slot_size * count, 16); 2607 for (int i = count - 1; i >= 0; i--) { 2608 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2609 } 2610 add(stack, stack, total_pop_bytes); 2611 return total_pop_bytes / 8; 2612 } 2613 2614 #ifdef ASSERT 2615 void MacroAssembler::verify_heapbase(const char* msg) { 2616 #if 0 2617 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2618 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2619 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2620 // rheapbase is allocated as general register 2621 return; 2622 } 2623 if (CheckCompressedOops) { 2624 Label ok; 2625 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2626 cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2627 br(Assembler::EQ, ok); 2628 stop(msg); 2629 bind(ok); 2630 pop(1 << rscratch1->encoding(), sp); 2631 } 2632 #endif 2633 } 2634 #endif 2635 2636 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 2637 assert_different_registers(value, tmp1, tmp2); 2638 Label done, tagged, weak_tagged; 2639 2640 cbz(value, done); // Use null as-is. 2641 tst(value, JNIHandles::tag_mask); // Test for tag. 2642 br(Assembler::NE, tagged); 2643 2644 // Resolve local handle 2645 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 2646 verify_oop(value); 2647 b(done); 2648 2649 bind(tagged); 2650 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 2651 tbnz(value, 0, weak_tagged); // Test for weak tag. 2652 2653 // Resolve global handle 2654 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 2655 verify_oop(value); 2656 b(done); 2657 2658 bind(weak_tagged); 2659 // Resolve jweak. 2660 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 2661 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 2662 verify_oop(value); 2663 2664 bind(done); 2665 } 2666 2667 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 2668 assert_different_registers(value, tmp1, tmp2); 2669 Label done; 2670 2671 cbz(value, done); // Use null as-is. 2672 2673 #ifdef ASSERT 2674 { 2675 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 2676 Label valid_global_tag; 2677 tbnz(value, 1, valid_global_tag); // Test for global tag 2678 stop("non global jobject using resolve_global_jobject"); 2679 bind(valid_global_tag); 2680 } 2681 #endif 2682 2683 // Resolve global handle 2684 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 2685 verify_oop(value); 2686 2687 bind(done); 2688 } 2689 2690 void MacroAssembler::stop(const char* msg) { 2691 BLOCK_COMMENT(msg); 2692 dcps1(0xdeae); 2693 emit_int64((uintptr_t)msg); 2694 } 2695 2696 void MacroAssembler::unimplemented(const char* what) { 2697 const char* buf = nullptr; 2698 { 2699 ResourceMark rm; 2700 stringStream ss; 2701 ss.print("unimplemented: %s", what); 2702 buf = code_string(ss.as_string()); 2703 } 2704 stop(buf); 2705 } 2706 2707 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 2708 #ifdef ASSERT 2709 Label OK; 2710 br(cc, OK); 2711 stop(msg); 2712 bind(OK); 2713 #endif 2714 } 2715 2716 // If a constant does not fit in an immediate field, generate some 2717 // number of MOV instructions and then perform the operation. 2718 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 2719 add_sub_imm_insn insn1, 2720 add_sub_reg_insn insn2, 2721 bool is32) { 2722 assert(Rd != zr, "Rd = zr and not setting flags?"); 2723 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 2724 if (fits) { 2725 (this->*insn1)(Rd, Rn, imm); 2726 } else { 2727 if (uabs(imm) < (1 << 24)) { 2728 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 2729 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 2730 } else { 2731 assert_different_registers(Rd, Rn); 2732 mov(Rd, imm); 2733 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2734 } 2735 } 2736 } 2737 2738 // Separate vsn which sets the flags. Optimisations are more restricted 2739 // because we must set the flags correctly. 2740 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 2741 add_sub_imm_insn insn1, 2742 add_sub_reg_insn insn2, 2743 bool is32) { 2744 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 2745 if (fits) { 2746 (this->*insn1)(Rd, Rn, imm); 2747 } else { 2748 assert_different_registers(Rd, Rn); 2749 assert(Rd != zr, "overflow in immediate operand"); 2750 mov(Rd, imm); 2751 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2752 } 2753 } 2754 2755 2756 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 2757 if (increment.is_register()) { 2758 add(Rd, Rn, increment.as_register()); 2759 } else { 2760 add(Rd, Rn, increment.as_constant()); 2761 } 2762 } 2763 2764 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2765 if (increment.is_register()) { 2766 addw(Rd, Rn, increment.as_register()); 2767 } else { 2768 addw(Rd, Rn, increment.as_constant()); 2769 } 2770 } 2771 2772 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 2773 if (decrement.is_register()) { 2774 sub(Rd, Rn, decrement.as_register()); 2775 } else { 2776 sub(Rd, Rn, decrement.as_constant()); 2777 } 2778 } 2779 2780 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 2781 if (decrement.is_register()) { 2782 subw(Rd, Rn, decrement.as_register()); 2783 } else { 2784 subw(Rd, Rn, decrement.as_constant()); 2785 } 2786 } 2787 2788 void MacroAssembler::reinit_heapbase() 2789 { 2790 if (UseCompressedOops) { 2791 if (Universe::is_fully_initialized()) { 2792 mov(rheapbase, CompressedOops::ptrs_base()); 2793 } else { 2794 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2795 ldr(rheapbase, Address(rheapbase)); 2796 } 2797 } 2798 } 2799 2800 // this simulates the behaviour of the x86 cmpxchg instruction using a 2801 // load linked/store conditional pair. we use the acquire/release 2802 // versions of these instructions so that we flush pending writes as 2803 // per Java semantics. 2804 2805 // n.b the x86 version assumes the old value to be compared against is 2806 // in rax and updates rax with the value located in memory if the 2807 // cmpxchg fails. we supply a register for the old value explicitly 2808 2809 // the aarch64 load linked/store conditional instructions do not 2810 // accept an offset. so, unlike x86, we must provide a plain register 2811 // to identify the memory word to be compared/exchanged rather than a 2812 // register+offset Address. 2813 2814 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2815 Label &succeed, Label *fail) { 2816 // oldv holds comparison value 2817 // newv holds value to write in exchange 2818 // addr identifies memory word to compare against/update 2819 if (UseLSE) { 2820 mov(tmp, oldv); 2821 casal(Assembler::xword, oldv, newv, addr); 2822 cmp(tmp, oldv); 2823 br(Assembler::EQ, succeed); 2824 membar(AnyAny); 2825 } else { 2826 Label retry_load, nope; 2827 prfm(Address(addr), PSTL1STRM); 2828 bind(retry_load); 2829 // flush and load exclusive from the memory location 2830 // and fail if it is not what we expect 2831 ldaxr(tmp, addr); 2832 cmp(tmp, oldv); 2833 br(Assembler::NE, nope); 2834 // if we store+flush with no intervening write tmp will be zero 2835 stlxr(tmp, newv, addr); 2836 cbzw(tmp, succeed); 2837 // retry so we only ever return after a load fails to compare 2838 // ensures we don't return a stale value after a failed write. 2839 b(retry_load); 2840 // if the memory word differs we return it in oldv and signal a fail 2841 bind(nope); 2842 membar(AnyAny); 2843 mov(oldv, tmp); 2844 } 2845 if (fail) 2846 b(*fail); 2847 } 2848 2849 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 2850 Label &succeed, Label *fail) { 2851 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 2852 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 2853 } 2854 2855 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2856 Label &succeed, Label *fail) { 2857 // oldv holds comparison value 2858 // newv holds value to write in exchange 2859 // addr identifies memory word to compare against/update 2860 // tmp returns 0/1 for success/failure 2861 if (UseLSE) { 2862 mov(tmp, oldv); 2863 casal(Assembler::word, oldv, newv, addr); 2864 cmp(tmp, oldv); 2865 br(Assembler::EQ, succeed); 2866 membar(AnyAny); 2867 } else { 2868 Label retry_load, nope; 2869 prfm(Address(addr), PSTL1STRM); 2870 bind(retry_load); 2871 // flush and load exclusive from the memory location 2872 // and fail if it is not what we expect 2873 ldaxrw(tmp, addr); 2874 cmp(tmp, oldv); 2875 br(Assembler::NE, nope); 2876 // if we store+flush with no intervening write tmp will be zero 2877 stlxrw(tmp, newv, addr); 2878 cbzw(tmp, succeed); 2879 // retry so we only ever return after a load fails to compare 2880 // ensures we don't return a stale value after a failed write. 2881 b(retry_load); 2882 // if the memory word differs we return it in oldv and signal a fail 2883 bind(nope); 2884 membar(AnyAny); 2885 mov(oldv, tmp); 2886 } 2887 if (fail) 2888 b(*fail); 2889 } 2890 2891 // A generic CAS; success or failure is in the EQ flag. A weak CAS 2892 // doesn't retry and may fail spuriously. If the oldval is wanted, 2893 // Pass a register for the result, otherwise pass noreg. 2894 2895 // Clobbers rscratch1 2896 void MacroAssembler::cmpxchg(Register addr, Register expected, 2897 Register new_val, 2898 enum operand_size size, 2899 bool acquire, bool release, 2900 bool weak, 2901 Register result) { 2902 if (result == noreg) result = rscratch1; 2903 BLOCK_COMMENT("cmpxchg {"); 2904 if (UseLSE) { 2905 mov(result, expected); 2906 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 2907 compare_eq(result, expected, size); 2908 #ifdef ASSERT 2909 // Poison rscratch1 which is written on !UseLSE branch 2910 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 2911 #endif 2912 } else { 2913 Label retry_load, done; 2914 prfm(Address(addr), PSTL1STRM); 2915 bind(retry_load); 2916 load_exclusive(result, addr, size, acquire); 2917 compare_eq(result, expected, size); 2918 br(Assembler::NE, done); 2919 store_exclusive(rscratch1, new_val, addr, size, release); 2920 if (weak) { 2921 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 2922 } else { 2923 cbnzw(rscratch1, retry_load); 2924 } 2925 bind(done); 2926 } 2927 BLOCK_COMMENT("} cmpxchg"); 2928 } 2929 2930 // A generic comparison. Only compares for equality, clobbers rscratch1. 2931 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 2932 if (size == xword) { 2933 cmp(rm, rn); 2934 } else if (size == word) { 2935 cmpw(rm, rn); 2936 } else if (size == halfword) { 2937 eorw(rscratch1, rm, rn); 2938 ands(zr, rscratch1, 0xffff); 2939 } else if (size == byte) { 2940 eorw(rscratch1, rm, rn); 2941 ands(zr, rscratch1, 0xff); 2942 } else { 2943 ShouldNotReachHere(); 2944 } 2945 } 2946 2947 2948 static bool different(Register a, RegisterOrConstant b, Register c) { 2949 if (b.is_constant()) 2950 return a != c; 2951 else 2952 return a != b.as_register() && a != c && b.as_register() != c; 2953 } 2954 2955 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 2956 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 2957 if (UseLSE) { \ 2958 prev = prev->is_valid() ? prev : zr; \ 2959 if (incr.is_register()) { \ 2960 AOP(sz, incr.as_register(), prev, addr); \ 2961 } else { \ 2962 mov(rscratch2, incr.as_constant()); \ 2963 AOP(sz, rscratch2, prev, addr); \ 2964 } \ 2965 return; \ 2966 } \ 2967 Register result = rscratch2; \ 2968 if (prev->is_valid()) \ 2969 result = different(prev, incr, addr) ? prev : rscratch2; \ 2970 \ 2971 Label retry_load; \ 2972 prfm(Address(addr), PSTL1STRM); \ 2973 bind(retry_load); \ 2974 LDXR(result, addr); \ 2975 OP(rscratch1, result, incr); \ 2976 STXR(rscratch2, rscratch1, addr); \ 2977 cbnzw(rscratch2, retry_load); \ 2978 if (prev->is_valid() && prev != result) { \ 2979 IOP(prev, rscratch1, incr); \ 2980 } \ 2981 } 2982 2983 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 2984 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 2985 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 2986 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 2987 2988 #undef ATOMIC_OP 2989 2990 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 2991 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2992 if (UseLSE) { \ 2993 prev = prev->is_valid() ? prev : zr; \ 2994 AOP(sz, newv, prev, addr); \ 2995 return; \ 2996 } \ 2997 Register result = rscratch2; \ 2998 if (prev->is_valid()) \ 2999 result = different(prev, newv, addr) ? prev : rscratch2; \ 3000 \ 3001 Label retry_load; \ 3002 prfm(Address(addr), PSTL1STRM); \ 3003 bind(retry_load); \ 3004 LDXR(result, addr); \ 3005 STXR(rscratch1, newv, addr); \ 3006 cbnzw(rscratch1, retry_load); \ 3007 if (prev->is_valid() && prev != result) \ 3008 mov(prev, result); \ 3009 } 3010 3011 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3012 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3013 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3014 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3015 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3016 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3017 3018 #undef ATOMIC_XCHG 3019 3020 #ifndef PRODUCT 3021 extern "C" void findpc(intptr_t x); 3022 #endif 3023 3024 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3025 { 3026 // In order to get locks to work, we need to fake a in_VM state 3027 if (ShowMessageBoxOnError ) { 3028 JavaThread* thread = JavaThread::current(); 3029 JavaThreadState saved_state = thread->thread_state(); 3030 thread->set_thread_state(_thread_in_vm); 3031 #ifndef PRODUCT 3032 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3033 ttyLocker ttyl; 3034 BytecodeCounter::print(); 3035 } 3036 #endif 3037 if (os::message_box(msg, "Execution stopped, print registers?")) { 3038 ttyLocker ttyl; 3039 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3040 #ifndef PRODUCT 3041 tty->cr(); 3042 findpc(pc); 3043 tty->cr(); 3044 #endif 3045 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3046 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3047 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3048 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3049 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3050 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3051 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3052 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3053 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3054 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3055 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3056 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3057 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3058 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3059 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3060 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3061 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3062 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3063 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3064 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3065 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3066 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3067 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3068 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3069 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3070 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3071 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3072 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3073 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3074 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3075 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3076 BREAKPOINT; 3077 } 3078 } 3079 fatal("DEBUG MESSAGE: %s", msg); 3080 } 3081 3082 RegSet MacroAssembler::call_clobbered_gp_registers() { 3083 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3084 #ifndef R18_RESERVED 3085 regs += r18_tls; 3086 #endif 3087 return regs; 3088 } 3089 3090 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3091 int step = 4 * wordSize; 3092 push(call_clobbered_gp_registers() - exclude, sp); 3093 sub(sp, sp, step); 3094 mov(rscratch1, -step); 3095 // Push v0-v7, v16-v31. 3096 for (int i = 31; i>= 4; i -= 4) { 3097 if (i <= v7->encoding() || i >= v16->encoding()) 3098 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3099 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3100 } 3101 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3102 as_FloatRegister(3), T1D, Address(sp)); 3103 } 3104 3105 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3106 for (int i = 0; i < 32; i += 4) { 3107 if (i <= v7->encoding() || i >= v16->encoding()) 3108 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3109 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3110 } 3111 3112 reinitialize_ptrue(); 3113 3114 pop(call_clobbered_gp_registers() - exclude, sp); 3115 } 3116 3117 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3118 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3119 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3120 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3121 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3122 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3123 sve_str(as_FloatRegister(i), Address(sp, i)); 3124 } 3125 } else { 3126 int step = (save_vectors ? 8 : 4) * wordSize; 3127 mov(rscratch1, -step); 3128 sub(sp, sp, step); 3129 for (int i = 28; i >= 4; i -= 4) { 3130 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3131 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3132 } 3133 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3134 } 3135 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3136 sub(sp, sp, total_predicate_in_bytes); 3137 for (int i = 0; i < PRegister::number_of_registers; i++) { 3138 sve_str(as_PRegister(i), Address(sp, i)); 3139 } 3140 } 3141 } 3142 3143 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3144 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3145 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3146 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3147 sve_ldr(as_PRegister(i), Address(sp, i)); 3148 } 3149 add(sp, sp, total_predicate_in_bytes); 3150 } 3151 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3152 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3153 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3154 } 3155 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3156 } else { 3157 int step = (restore_vectors ? 8 : 4) * wordSize; 3158 for (int i = 0; i <= 28; i += 4) 3159 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3160 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3161 } 3162 3163 // We may use predicate registers and rely on ptrue with SVE, 3164 // regardless of wide vector (> 8 bytes) used or not. 3165 if (use_sve) { 3166 reinitialize_ptrue(); 3167 } 3168 3169 // integer registers except lr & sp 3170 pop(RegSet::range(r0, r17), sp); 3171 #ifdef R18_RESERVED 3172 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3173 pop(RegSet::range(r20, r29), sp); 3174 #else 3175 pop(RegSet::range(r18_tls, r29), sp); 3176 #endif 3177 } 3178 3179 /** 3180 * Helpers for multiply_to_len(). 3181 */ 3182 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3183 Register src1, Register src2) { 3184 adds(dest_lo, dest_lo, src1); 3185 adc(dest_hi, dest_hi, zr); 3186 adds(dest_lo, dest_lo, src2); 3187 adc(final_dest_hi, dest_hi, zr); 3188 } 3189 3190 // Generate an address from (r + r1 extend offset). "size" is the 3191 // size of the operand. The result may be in rscratch2. 3192 Address MacroAssembler::offsetted_address(Register r, Register r1, 3193 Address::extend ext, int offset, int size) { 3194 if (offset || (ext.shift() % size != 0)) { 3195 lea(rscratch2, Address(r, r1, ext)); 3196 return Address(rscratch2, offset); 3197 } else { 3198 return Address(r, r1, ext); 3199 } 3200 } 3201 3202 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3203 { 3204 assert(offset >= 0, "spill to negative address?"); 3205 // Offset reachable ? 3206 // Not aligned - 9 bits signed offset 3207 // Aligned - 12 bits unsigned offset shifted 3208 Register base = sp; 3209 if ((offset & (size-1)) && offset >= (1<<8)) { 3210 add(tmp, base, offset & ((1<<12)-1)); 3211 base = tmp; 3212 offset &= -1u<<12; 3213 } 3214 3215 if (offset >= (1<<12) * size) { 3216 add(tmp, base, offset & (((1<<12)-1)<<12)); 3217 base = tmp; 3218 offset &= ~(((1<<12)-1)<<12); 3219 } 3220 3221 return Address(base, offset); 3222 } 3223 3224 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3225 assert(offset >= 0, "spill to negative address?"); 3226 3227 Register base = sp; 3228 3229 // An immediate offset in the range 0 to 255 which is multiplied 3230 // by the current vector or predicate register size in bytes. 3231 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3232 return Address(base, offset / sve_reg_size_in_bytes); 3233 } 3234 3235 add(tmp, base, offset); 3236 return Address(tmp); 3237 } 3238 3239 // Checks whether offset is aligned. 3240 // Returns true if it is, else false. 3241 bool MacroAssembler::merge_alignment_check(Register base, 3242 size_t size, 3243 int64_t cur_offset, 3244 int64_t prev_offset) const { 3245 if (AvoidUnalignedAccesses) { 3246 if (base == sp) { 3247 // Checks whether low offset if aligned to pair of registers. 3248 int64_t pair_mask = size * 2 - 1; 3249 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3250 return (offset & pair_mask) == 0; 3251 } else { // If base is not sp, we can't guarantee the access is aligned. 3252 return false; 3253 } 3254 } else { 3255 int64_t mask = size - 1; 3256 // Load/store pair instruction only supports element size aligned offset. 3257 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3258 } 3259 } 3260 3261 // Checks whether current and previous loads/stores can be merged. 3262 // Returns true if it can be merged, else false. 3263 bool MacroAssembler::ldst_can_merge(Register rt, 3264 const Address &adr, 3265 size_t cur_size_in_bytes, 3266 bool is_store) const { 3267 address prev = pc() - NativeInstruction::instruction_size; 3268 address last = code()->last_insn(); 3269 3270 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3271 return false; 3272 } 3273 3274 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3275 return false; 3276 } 3277 3278 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3279 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3280 3281 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3282 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3283 3284 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3285 return false; 3286 } 3287 3288 int64_t max_offset = 63 * prev_size_in_bytes; 3289 int64_t min_offset = -64 * prev_size_in_bytes; 3290 3291 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3292 3293 // Only same base can be merged. 3294 if (adr.base() != prev_ldst->base()) { 3295 return false; 3296 } 3297 3298 int64_t cur_offset = adr.offset(); 3299 int64_t prev_offset = prev_ldst->offset(); 3300 size_t diff = abs(cur_offset - prev_offset); 3301 if (diff != prev_size_in_bytes) { 3302 return false; 3303 } 3304 3305 // Following cases can not be merged: 3306 // ldr x2, [x2, #8] 3307 // ldr x3, [x2, #16] 3308 // or: 3309 // ldr x2, [x3, #8] 3310 // ldr x2, [x3, #16] 3311 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3312 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3313 return false; 3314 } 3315 3316 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3317 // Offset range must be in ldp/stp instruction's range. 3318 if (low_offset > max_offset || low_offset < min_offset) { 3319 return false; 3320 } 3321 3322 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3323 return true; 3324 } 3325 3326 return false; 3327 } 3328 3329 // Merge current load/store with previous load/store into ldp/stp. 3330 void MacroAssembler::merge_ldst(Register rt, 3331 const Address &adr, 3332 size_t cur_size_in_bytes, 3333 bool is_store) { 3334 3335 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3336 3337 Register rt_low, rt_high; 3338 address prev = pc() - NativeInstruction::instruction_size; 3339 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3340 3341 int64_t offset; 3342 3343 if (adr.offset() < prev_ldst->offset()) { 3344 offset = adr.offset(); 3345 rt_low = rt; 3346 rt_high = prev_ldst->target(); 3347 } else { 3348 offset = prev_ldst->offset(); 3349 rt_low = prev_ldst->target(); 3350 rt_high = rt; 3351 } 3352 3353 Address adr_p = Address(prev_ldst->base(), offset); 3354 // Overwrite previous generated binary. 3355 code_section()->set_end(prev); 3356 3357 const size_t sz = prev_ldst->size_in_bytes(); 3358 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3359 if (!is_store) { 3360 BLOCK_COMMENT("merged ldr pair"); 3361 if (sz == 8) { 3362 ldp(rt_low, rt_high, adr_p); 3363 } else { 3364 ldpw(rt_low, rt_high, adr_p); 3365 } 3366 } else { 3367 BLOCK_COMMENT("merged str pair"); 3368 if (sz == 8) { 3369 stp(rt_low, rt_high, adr_p); 3370 } else { 3371 stpw(rt_low, rt_high, adr_p); 3372 } 3373 } 3374 } 3375 3376 /** 3377 * Multiply 64 bit by 64 bit first loop. 3378 */ 3379 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3380 Register y, Register y_idx, Register z, 3381 Register carry, Register product, 3382 Register idx, Register kdx) { 3383 // 3384 // jlong carry, x[], y[], z[]; 3385 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3386 // huge_128 product = y[idx] * x[xstart] + carry; 3387 // z[kdx] = (jlong)product; 3388 // carry = (jlong)(product >>> 64); 3389 // } 3390 // z[xstart] = carry; 3391 // 3392 3393 Label L_first_loop, L_first_loop_exit; 3394 Label L_one_x, L_one_y, L_multiply; 3395 3396 subsw(xstart, xstart, 1); 3397 br(Assembler::MI, L_one_x); 3398 3399 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3400 ldr(x_xstart, Address(rscratch1)); 3401 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3402 3403 bind(L_first_loop); 3404 subsw(idx, idx, 1); 3405 br(Assembler::MI, L_first_loop_exit); 3406 subsw(idx, idx, 1); 3407 br(Assembler::MI, L_one_y); 3408 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3409 ldr(y_idx, Address(rscratch1)); 3410 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3411 bind(L_multiply); 3412 3413 // AArch64 has a multiply-accumulate instruction that we can't use 3414 // here because it has no way to process carries, so we have to use 3415 // separate add and adc instructions. Bah. 3416 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3417 mul(product, x_xstart, y_idx); 3418 adds(product, product, carry); 3419 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3420 3421 subw(kdx, kdx, 2); 3422 ror(product, product, 32); // back to big-endian 3423 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3424 3425 b(L_first_loop); 3426 3427 bind(L_one_y); 3428 ldrw(y_idx, Address(y, 0)); 3429 b(L_multiply); 3430 3431 bind(L_one_x); 3432 ldrw(x_xstart, Address(x, 0)); 3433 b(L_first_loop); 3434 3435 bind(L_first_loop_exit); 3436 } 3437 3438 /** 3439 * Multiply 128 bit by 128. Unrolled inner loop. 3440 * 3441 */ 3442 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3443 Register carry, Register carry2, 3444 Register idx, Register jdx, 3445 Register yz_idx1, Register yz_idx2, 3446 Register tmp, Register tmp3, Register tmp4, 3447 Register tmp6, Register product_hi) { 3448 3449 // jlong carry, x[], y[], z[]; 3450 // int kdx = ystart+1; 3451 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3452 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3453 // jlong carry2 = (jlong)(tmp3 >>> 64); 3454 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3455 // carry = (jlong)(tmp4 >>> 64); 3456 // z[kdx+idx+1] = (jlong)tmp3; 3457 // z[kdx+idx] = (jlong)tmp4; 3458 // } 3459 // idx += 2; 3460 // if (idx > 0) { 3461 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3462 // z[kdx+idx] = (jlong)yz_idx1; 3463 // carry = (jlong)(yz_idx1 >>> 64); 3464 // } 3465 // 3466 3467 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3468 3469 lsrw(jdx, idx, 2); 3470 3471 bind(L_third_loop); 3472 3473 subsw(jdx, jdx, 1); 3474 br(Assembler::MI, L_third_loop_exit); 3475 subw(idx, idx, 4); 3476 3477 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3478 3479 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3480 3481 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3482 3483 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3484 ror(yz_idx2, yz_idx2, 32); 3485 3486 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3487 3488 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3489 umulh(tmp4, product_hi, yz_idx1); 3490 3491 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3492 ror(rscratch2, rscratch2, 32); 3493 3494 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3495 umulh(carry2, product_hi, yz_idx2); 3496 3497 // propagate sum of both multiplications into carry:tmp4:tmp3 3498 adds(tmp3, tmp3, carry); 3499 adc(tmp4, tmp4, zr); 3500 adds(tmp3, tmp3, rscratch1); 3501 adcs(tmp4, tmp4, tmp); 3502 adc(carry, carry2, zr); 3503 adds(tmp4, tmp4, rscratch2); 3504 adc(carry, carry, zr); 3505 3506 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3507 ror(tmp4, tmp4, 32); 3508 stp(tmp4, tmp3, Address(tmp6, 0)); 3509 3510 b(L_third_loop); 3511 bind (L_third_loop_exit); 3512 3513 andw (idx, idx, 0x3); 3514 cbz(idx, L_post_third_loop_done); 3515 3516 Label L_check_1; 3517 subsw(idx, idx, 2); 3518 br(Assembler::MI, L_check_1); 3519 3520 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3521 ldr(yz_idx1, Address(rscratch1, 0)); 3522 ror(yz_idx1, yz_idx1, 32); 3523 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3524 umulh(tmp4, product_hi, yz_idx1); 3525 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3526 ldr(yz_idx2, Address(rscratch1, 0)); 3527 ror(yz_idx2, yz_idx2, 32); 3528 3529 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3530 3531 ror(tmp3, tmp3, 32); 3532 str(tmp3, Address(rscratch1, 0)); 3533 3534 bind (L_check_1); 3535 3536 andw (idx, idx, 0x1); 3537 subsw(idx, idx, 1); 3538 br(Assembler::MI, L_post_third_loop_done); 3539 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3540 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3541 umulh(carry2, tmp4, product_hi); 3542 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3543 3544 add2_with_carry(carry2, tmp3, tmp4, carry); 3545 3546 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3547 extr(carry, carry2, tmp3, 32); 3548 3549 bind(L_post_third_loop_done); 3550 } 3551 3552 /** 3553 * Code for BigInteger::multiplyToLen() intrinsic. 3554 * 3555 * r0: x 3556 * r1: xlen 3557 * r2: y 3558 * r3: ylen 3559 * r4: z 3560 * r5: zlen 3561 * r10: tmp1 3562 * r11: tmp2 3563 * r12: tmp3 3564 * r13: tmp4 3565 * r14: tmp5 3566 * r15: tmp6 3567 * r16: tmp7 3568 * 3569 */ 3570 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3571 Register z, Register zlen, 3572 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3573 Register tmp5, Register tmp6, Register product_hi) { 3574 3575 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 3576 3577 const Register idx = tmp1; 3578 const Register kdx = tmp2; 3579 const Register xstart = tmp3; 3580 3581 const Register y_idx = tmp4; 3582 const Register carry = tmp5; 3583 const Register product = xlen; 3584 const Register x_xstart = zlen; // reuse register 3585 3586 // First Loop. 3587 // 3588 // final static long LONG_MASK = 0xffffffffL; 3589 // int xstart = xlen - 1; 3590 // int ystart = ylen - 1; 3591 // long carry = 0; 3592 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3593 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3594 // z[kdx] = (int)product; 3595 // carry = product >>> 32; 3596 // } 3597 // z[xstart] = (int)carry; 3598 // 3599 3600 movw(idx, ylen); // idx = ylen; 3601 movw(kdx, zlen); // kdx = xlen+ylen; 3602 mov(carry, zr); // carry = 0; 3603 3604 Label L_done; 3605 3606 movw(xstart, xlen); 3607 subsw(xstart, xstart, 1); 3608 br(Assembler::MI, L_done); 3609 3610 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3611 3612 Label L_second_loop; 3613 cbzw(kdx, L_second_loop); 3614 3615 Label L_carry; 3616 subw(kdx, kdx, 1); 3617 cbzw(kdx, L_carry); 3618 3619 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3620 lsr(carry, carry, 32); 3621 subw(kdx, kdx, 1); 3622 3623 bind(L_carry); 3624 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3625 3626 // Second and third (nested) loops. 3627 // 3628 // for (int i = xstart-1; i >= 0; i--) { // Second loop 3629 // carry = 0; 3630 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 3631 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 3632 // (z[k] & LONG_MASK) + carry; 3633 // z[k] = (int)product; 3634 // carry = product >>> 32; 3635 // } 3636 // z[i] = (int)carry; 3637 // } 3638 // 3639 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 3640 3641 const Register jdx = tmp1; 3642 3643 bind(L_second_loop); 3644 mov(carry, zr); // carry = 0; 3645 movw(jdx, ylen); // j = ystart+1 3646 3647 subsw(xstart, xstart, 1); // i = xstart-1; 3648 br(Assembler::MI, L_done); 3649 3650 str(z, Address(pre(sp, -4 * wordSize))); 3651 3652 Label L_last_x; 3653 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 3654 subsw(xstart, xstart, 1); // i = xstart-1; 3655 br(Assembler::MI, L_last_x); 3656 3657 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 3658 ldr(product_hi, Address(rscratch1)); 3659 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 3660 3661 Label L_third_loop_prologue; 3662 bind(L_third_loop_prologue); 3663 3664 str(ylen, Address(sp, wordSize)); 3665 stp(x, xstart, Address(sp, 2 * wordSize)); 3666 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 3667 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 3668 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 3669 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 3670 3671 addw(tmp3, xlen, 1); 3672 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 3673 subsw(tmp3, tmp3, 1); 3674 br(Assembler::MI, L_done); 3675 3676 lsr(carry, carry, 32); 3677 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 3678 b(L_second_loop); 3679 3680 // Next infrequent code is moved outside loops. 3681 bind(L_last_x); 3682 ldrw(product_hi, Address(x, 0)); 3683 b(L_third_loop_prologue); 3684 3685 bind(L_done); 3686 } 3687 3688 // Code for BigInteger::mulAdd intrinsic 3689 // out = r0 3690 // in = r1 3691 // offset = r2 (already out.length-offset) 3692 // len = r3 3693 // k = r4 3694 // 3695 // pseudo code from java implementation: 3696 // carry = 0; 3697 // offset = out.length-offset - 1; 3698 // for (int j=len-1; j >= 0; j--) { 3699 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 3700 // out[offset--] = (int)product; 3701 // carry = product >>> 32; 3702 // } 3703 // return (int)carry; 3704 void MacroAssembler::mul_add(Register out, Register in, Register offset, 3705 Register len, Register k) { 3706 Label LOOP, END; 3707 // pre-loop 3708 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 3709 csel(out, zr, out, Assembler::EQ); 3710 br(Assembler::EQ, END); 3711 add(in, in, len, LSL, 2); // in[j+1] address 3712 add(offset, out, offset, LSL, 2); // out[offset + 1] address 3713 mov(out, zr); // used to keep carry now 3714 BIND(LOOP); 3715 ldrw(rscratch1, Address(pre(in, -4))); 3716 madd(rscratch1, rscratch1, k, out); 3717 ldrw(rscratch2, Address(pre(offset, -4))); 3718 add(rscratch1, rscratch1, rscratch2); 3719 strw(rscratch1, Address(offset)); 3720 lsr(out, rscratch1, 32); 3721 subs(len, len, 1); 3722 br(Assembler::NE, LOOP); 3723 BIND(END); 3724 } 3725 3726 /** 3727 * Emits code to update CRC-32 with a byte value according to constants in table 3728 * 3729 * @param [in,out]crc Register containing the crc. 3730 * @param [in]val Register containing the byte to fold into the CRC. 3731 * @param [in]table Register containing the table of crc constants. 3732 * 3733 * uint32_t crc; 3734 * val = crc_table[(val ^ crc) & 0xFF]; 3735 * crc = val ^ (crc >> 8); 3736 * 3737 */ 3738 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 3739 eor(val, val, crc); 3740 andr(val, val, 0xff); 3741 ldrw(val, Address(table, val, Address::lsl(2))); 3742 eor(crc, val, crc, Assembler::LSR, 8); 3743 } 3744 3745 /** 3746 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 3747 * 3748 * @param [in,out]crc Register containing the crc. 3749 * @param [in]v Register containing the 32-bit to fold into the CRC. 3750 * @param [in]table0 Register containing table 0 of crc constants. 3751 * @param [in]table1 Register containing table 1 of crc constants. 3752 * @param [in]table2 Register containing table 2 of crc constants. 3753 * @param [in]table3 Register containing table 3 of crc constants. 3754 * 3755 * uint32_t crc; 3756 * v = crc ^ v 3757 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 3758 * 3759 */ 3760 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 3761 Register table0, Register table1, Register table2, Register table3, 3762 bool upper) { 3763 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 3764 uxtb(tmp, v); 3765 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 3766 ubfx(tmp, v, 8, 8); 3767 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 3768 eor(crc, crc, tmp); 3769 ubfx(tmp, v, 16, 8); 3770 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 3771 eor(crc, crc, tmp); 3772 ubfx(tmp, v, 24, 8); 3773 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 3774 eor(crc, crc, tmp); 3775 } 3776 3777 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 3778 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 3779 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 3780 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 3781 3782 subs(tmp0, len, 384); 3783 mvnw(crc, crc); 3784 br(Assembler::GE, CRC_by128_pre); 3785 BIND(CRC_less128); 3786 subs(len, len, 32); 3787 br(Assembler::GE, CRC_by32_loop); 3788 BIND(CRC_less32); 3789 adds(len, len, 32 - 4); 3790 br(Assembler::GE, CRC_by4_loop); 3791 adds(len, len, 4); 3792 br(Assembler::GT, CRC_by1_loop); 3793 b(L_exit); 3794 3795 BIND(CRC_by32_loop); 3796 ldp(tmp0, tmp1, Address(buf)); 3797 crc32x(crc, crc, tmp0); 3798 ldp(tmp2, tmp3, Address(buf, 16)); 3799 crc32x(crc, crc, tmp1); 3800 add(buf, buf, 32); 3801 crc32x(crc, crc, tmp2); 3802 subs(len, len, 32); 3803 crc32x(crc, crc, tmp3); 3804 br(Assembler::GE, CRC_by32_loop); 3805 cmn(len, (u1)32); 3806 br(Assembler::NE, CRC_less32); 3807 b(L_exit); 3808 3809 BIND(CRC_by4_loop); 3810 ldrw(tmp0, Address(post(buf, 4))); 3811 subs(len, len, 4); 3812 crc32w(crc, crc, tmp0); 3813 br(Assembler::GE, CRC_by4_loop); 3814 adds(len, len, 4); 3815 br(Assembler::LE, L_exit); 3816 BIND(CRC_by1_loop); 3817 ldrb(tmp0, Address(post(buf, 1))); 3818 subs(len, len, 1); 3819 crc32b(crc, crc, tmp0); 3820 br(Assembler::GT, CRC_by1_loop); 3821 b(L_exit); 3822 3823 BIND(CRC_by128_pre); 3824 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 3825 4*256*sizeof(juint) + 8*sizeof(juint)); 3826 mov(crc, 0); 3827 crc32x(crc, crc, tmp0); 3828 crc32x(crc, crc, tmp1); 3829 3830 cbnz(len, CRC_less128); 3831 3832 BIND(L_exit); 3833 mvnw(crc, crc); 3834 } 3835 3836 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 3837 Register len, Register tmp0, Register tmp1, Register tmp2, 3838 Register tmp3) { 3839 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 3840 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 3841 3842 mvnw(crc, crc); 3843 3844 subs(len, len, 128); 3845 br(Assembler::GE, CRC_by64_pre); 3846 BIND(CRC_less64); 3847 adds(len, len, 128-32); 3848 br(Assembler::GE, CRC_by32_loop); 3849 BIND(CRC_less32); 3850 adds(len, len, 32-4); 3851 br(Assembler::GE, CRC_by4_loop); 3852 adds(len, len, 4); 3853 br(Assembler::GT, CRC_by1_loop); 3854 b(L_exit); 3855 3856 BIND(CRC_by32_loop); 3857 ldp(tmp0, tmp1, Address(post(buf, 16))); 3858 subs(len, len, 32); 3859 crc32x(crc, crc, tmp0); 3860 ldr(tmp2, Address(post(buf, 8))); 3861 crc32x(crc, crc, tmp1); 3862 ldr(tmp3, Address(post(buf, 8))); 3863 crc32x(crc, crc, tmp2); 3864 crc32x(crc, crc, tmp3); 3865 br(Assembler::GE, CRC_by32_loop); 3866 cmn(len, (u1)32); 3867 br(Assembler::NE, CRC_less32); 3868 b(L_exit); 3869 3870 BIND(CRC_by4_loop); 3871 ldrw(tmp0, Address(post(buf, 4))); 3872 subs(len, len, 4); 3873 crc32w(crc, crc, tmp0); 3874 br(Assembler::GE, CRC_by4_loop); 3875 adds(len, len, 4); 3876 br(Assembler::LE, L_exit); 3877 BIND(CRC_by1_loop); 3878 ldrb(tmp0, Address(post(buf, 1))); 3879 subs(len, len, 1); 3880 crc32b(crc, crc, tmp0); 3881 br(Assembler::GT, CRC_by1_loop); 3882 b(L_exit); 3883 3884 BIND(CRC_by64_pre); 3885 sub(buf, buf, 8); 3886 ldp(tmp0, tmp1, Address(buf, 8)); 3887 crc32x(crc, crc, tmp0); 3888 ldr(tmp2, Address(buf, 24)); 3889 crc32x(crc, crc, tmp1); 3890 ldr(tmp3, Address(buf, 32)); 3891 crc32x(crc, crc, tmp2); 3892 ldr(tmp0, Address(buf, 40)); 3893 crc32x(crc, crc, tmp3); 3894 ldr(tmp1, Address(buf, 48)); 3895 crc32x(crc, crc, tmp0); 3896 ldr(tmp2, Address(buf, 56)); 3897 crc32x(crc, crc, tmp1); 3898 ldr(tmp3, Address(pre(buf, 64))); 3899 3900 b(CRC_by64_loop); 3901 3902 align(CodeEntryAlignment); 3903 BIND(CRC_by64_loop); 3904 subs(len, len, 64); 3905 crc32x(crc, crc, tmp2); 3906 ldr(tmp0, Address(buf, 8)); 3907 crc32x(crc, crc, tmp3); 3908 ldr(tmp1, Address(buf, 16)); 3909 crc32x(crc, crc, tmp0); 3910 ldr(tmp2, Address(buf, 24)); 3911 crc32x(crc, crc, tmp1); 3912 ldr(tmp3, Address(buf, 32)); 3913 crc32x(crc, crc, tmp2); 3914 ldr(tmp0, Address(buf, 40)); 3915 crc32x(crc, crc, tmp3); 3916 ldr(tmp1, Address(buf, 48)); 3917 crc32x(crc, crc, tmp0); 3918 ldr(tmp2, Address(buf, 56)); 3919 crc32x(crc, crc, tmp1); 3920 ldr(tmp3, Address(pre(buf, 64))); 3921 br(Assembler::GE, CRC_by64_loop); 3922 3923 // post-loop 3924 crc32x(crc, crc, tmp2); 3925 crc32x(crc, crc, tmp3); 3926 3927 sub(len, len, 64); 3928 add(buf, buf, 8); 3929 cmn(len, (u1)128); 3930 br(Assembler::NE, CRC_less64); 3931 BIND(L_exit); 3932 mvnw(crc, crc); 3933 } 3934 3935 /** 3936 * @param crc register containing existing CRC (32-bit) 3937 * @param buf register pointing to input byte buffer (byte*) 3938 * @param len register containing number of bytes 3939 * @param table register that will contain address of CRC table 3940 * @param tmp scratch register 3941 */ 3942 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 3943 Register table0, Register table1, Register table2, Register table3, 3944 Register tmp, Register tmp2, Register tmp3) { 3945 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 3946 3947 if (UseCryptoPmullForCRC32) { 3948 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 3949 return; 3950 } 3951 3952 if (UseCRC32) { 3953 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 3954 return; 3955 } 3956 3957 mvnw(crc, crc); 3958 3959 { 3960 uint64_t offset; 3961 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 3962 add(table0, table0, offset); 3963 } 3964 add(table1, table0, 1*256*sizeof(juint)); 3965 add(table2, table0, 2*256*sizeof(juint)); 3966 add(table3, table0, 3*256*sizeof(juint)); 3967 3968 { // Neon code start 3969 cmp(len, (u1)64); 3970 br(Assembler::LT, L_by16); 3971 eor(v16, T16B, v16, v16); 3972 3973 Label L_fold; 3974 3975 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 3976 3977 ld1(v0, v1, T2D, post(buf, 32)); 3978 ld1r(v4, T2D, post(tmp, 8)); 3979 ld1r(v5, T2D, post(tmp, 8)); 3980 ld1r(v6, T2D, post(tmp, 8)); 3981 ld1r(v7, T2D, post(tmp, 8)); 3982 mov(v16, S, 0, crc); 3983 3984 eor(v0, T16B, v0, v16); 3985 sub(len, len, 64); 3986 3987 BIND(L_fold); 3988 pmull(v22, T8H, v0, v5, T8B); 3989 pmull(v20, T8H, v0, v7, T8B); 3990 pmull(v23, T8H, v0, v4, T8B); 3991 pmull(v21, T8H, v0, v6, T8B); 3992 3993 pmull2(v18, T8H, v0, v5, T16B); 3994 pmull2(v16, T8H, v0, v7, T16B); 3995 pmull2(v19, T8H, v0, v4, T16B); 3996 pmull2(v17, T8H, v0, v6, T16B); 3997 3998 uzp1(v24, T8H, v20, v22); 3999 uzp2(v25, T8H, v20, v22); 4000 eor(v20, T16B, v24, v25); 4001 4002 uzp1(v26, T8H, v16, v18); 4003 uzp2(v27, T8H, v16, v18); 4004 eor(v16, T16B, v26, v27); 4005 4006 ushll2(v22, T4S, v20, T8H, 8); 4007 ushll(v20, T4S, v20, T4H, 8); 4008 4009 ushll2(v18, T4S, v16, T8H, 8); 4010 ushll(v16, T4S, v16, T4H, 8); 4011 4012 eor(v22, T16B, v23, v22); 4013 eor(v18, T16B, v19, v18); 4014 eor(v20, T16B, v21, v20); 4015 eor(v16, T16B, v17, v16); 4016 4017 uzp1(v17, T2D, v16, v20); 4018 uzp2(v21, T2D, v16, v20); 4019 eor(v17, T16B, v17, v21); 4020 4021 ushll2(v20, T2D, v17, T4S, 16); 4022 ushll(v16, T2D, v17, T2S, 16); 4023 4024 eor(v20, T16B, v20, v22); 4025 eor(v16, T16B, v16, v18); 4026 4027 uzp1(v17, T2D, v20, v16); 4028 uzp2(v21, T2D, v20, v16); 4029 eor(v28, T16B, v17, v21); 4030 4031 pmull(v22, T8H, v1, v5, T8B); 4032 pmull(v20, T8H, v1, v7, T8B); 4033 pmull(v23, T8H, v1, v4, T8B); 4034 pmull(v21, T8H, v1, v6, T8B); 4035 4036 pmull2(v18, T8H, v1, v5, T16B); 4037 pmull2(v16, T8H, v1, v7, T16B); 4038 pmull2(v19, T8H, v1, v4, T16B); 4039 pmull2(v17, T8H, v1, v6, T16B); 4040 4041 ld1(v0, v1, T2D, post(buf, 32)); 4042 4043 uzp1(v24, T8H, v20, v22); 4044 uzp2(v25, T8H, v20, v22); 4045 eor(v20, T16B, v24, v25); 4046 4047 uzp1(v26, T8H, v16, v18); 4048 uzp2(v27, T8H, v16, v18); 4049 eor(v16, T16B, v26, v27); 4050 4051 ushll2(v22, T4S, v20, T8H, 8); 4052 ushll(v20, T4S, v20, T4H, 8); 4053 4054 ushll2(v18, T4S, v16, T8H, 8); 4055 ushll(v16, T4S, v16, T4H, 8); 4056 4057 eor(v22, T16B, v23, v22); 4058 eor(v18, T16B, v19, v18); 4059 eor(v20, T16B, v21, v20); 4060 eor(v16, T16B, v17, v16); 4061 4062 uzp1(v17, T2D, v16, v20); 4063 uzp2(v21, T2D, v16, v20); 4064 eor(v16, T16B, v17, v21); 4065 4066 ushll2(v20, T2D, v16, T4S, 16); 4067 ushll(v16, T2D, v16, T2S, 16); 4068 4069 eor(v20, T16B, v22, v20); 4070 eor(v16, T16B, v16, v18); 4071 4072 uzp1(v17, T2D, v20, v16); 4073 uzp2(v21, T2D, v20, v16); 4074 eor(v20, T16B, v17, v21); 4075 4076 shl(v16, T2D, v28, 1); 4077 shl(v17, T2D, v20, 1); 4078 4079 eor(v0, T16B, v0, v16); 4080 eor(v1, T16B, v1, v17); 4081 4082 subs(len, len, 32); 4083 br(Assembler::GE, L_fold); 4084 4085 mov(crc, 0); 4086 mov(tmp, v0, D, 0); 4087 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4088 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4089 mov(tmp, v0, D, 1); 4090 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4091 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4092 mov(tmp, v1, D, 0); 4093 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4094 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4095 mov(tmp, v1, D, 1); 4096 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4097 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4098 4099 add(len, len, 32); 4100 } // Neon code end 4101 4102 BIND(L_by16); 4103 subs(len, len, 16); 4104 br(Assembler::GE, L_by16_loop); 4105 adds(len, len, 16-4); 4106 br(Assembler::GE, L_by4_loop); 4107 adds(len, len, 4); 4108 br(Assembler::GT, L_by1_loop); 4109 b(L_exit); 4110 4111 BIND(L_by4_loop); 4112 ldrw(tmp, Address(post(buf, 4))); 4113 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4114 subs(len, len, 4); 4115 br(Assembler::GE, L_by4_loop); 4116 adds(len, len, 4); 4117 br(Assembler::LE, L_exit); 4118 BIND(L_by1_loop); 4119 subs(len, len, 1); 4120 ldrb(tmp, Address(post(buf, 1))); 4121 update_byte_crc32(crc, tmp, table0); 4122 br(Assembler::GT, L_by1_loop); 4123 b(L_exit); 4124 4125 align(CodeEntryAlignment); 4126 BIND(L_by16_loop); 4127 subs(len, len, 16); 4128 ldp(tmp, tmp3, Address(post(buf, 16))); 4129 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4130 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4131 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4132 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4133 br(Assembler::GE, L_by16_loop); 4134 adds(len, len, 16-4); 4135 br(Assembler::GE, L_by4_loop); 4136 adds(len, len, 4); 4137 br(Assembler::GT, L_by1_loop); 4138 BIND(L_exit); 4139 mvnw(crc, crc); 4140 } 4141 4142 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4143 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4144 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4145 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4146 4147 subs(tmp0, len, 384); 4148 br(Assembler::GE, CRC_by128_pre); 4149 BIND(CRC_less128); 4150 subs(len, len, 32); 4151 br(Assembler::GE, CRC_by32_loop); 4152 BIND(CRC_less32); 4153 adds(len, len, 32 - 4); 4154 br(Assembler::GE, CRC_by4_loop); 4155 adds(len, len, 4); 4156 br(Assembler::GT, CRC_by1_loop); 4157 b(L_exit); 4158 4159 BIND(CRC_by32_loop); 4160 ldp(tmp0, tmp1, Address(buf)); 4161 crc32cx(crc, crc, tmp0); 4162 ldr(tmp2, Address(buf, 16)); 4163 crc32cx(crc, crc, tmp1); 4164 ldr(tmp3, Address(buf, 24)); 4165 crc32cx(crc, crc, tmp2); 4166 add(buf, buf, 32); 4167 subs(len, len, 32); 4168 crc32cx(crc, crc, tmp3); 4169 br(Assembler::GE, CRC_by32_loop); 4170 cmn(len, (u1)32); 4171 br(Assembler::NE, CRC_less32); 4172 b(L_exit); 4173 4174 BIND(CRC_by4_loop); 4175 ldrw(tmp0, Address(post(buf, 4))); 4176 subs(len, len, 4); 4177 crc32cw(crc, crc, tmp0); 4178 br(Assembler::GE, CRC_by4_loop); 4179 adds(len, len, 4); 4180 br(Assembler::LE, L_exit); 4181 BIND(CRC_by1_loop); 4182 ldrb(tmp0, Address(post(buf, 1))); 4183 subs(len, len, 1); 4184 crc32cb(crc, crc, tmp0); 4185 br(Assembler::GT, CRC_by1_loop); 4186 b(L_exit); 4187 4188 BIND(CRC_by128_pre); 4189 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4190 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4191 mov(crc, 0); 4192 crc32cx(crc, crc, tmp0); 4193 crc32cx(crc, crc, tmp1); 4194 4195 cbnz(len, CRC_less128); 4196 4197 BIND(L_exit); 4198 } 4199 4200 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4201 Register len, Register tmp0, Register tmp1, Register tmp2, 4202 Register tmp3) { 4203 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4204 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4205 4206 subs(len, len, 128); 4207 br(Assembler::GE, CRC_by64_pre); 4208 BIND(CRC_less64); 4209 adds(len, len, 128-32); 4210 br(Assembler::GE, CRC_by32_loop); 4211 BIND(CRC_less32); 4212 adds(len, len, 32-4); 4213 br(Assembler::GE, CRC_by4_loop); 4214 adds(len, len, 4); 4215 br(Assembler::GT, CRC_by1_loop); 4216 b(L_exit); 4217 4218 BIND(CRC_by32_loop); 4219 ldp(tmp0, tmp1, Address(post(buf, 16))); 4220 subs(len, len, 32); 4221 crc32cx(crc, crc, tmp0); 4222 ldr(tmp2, Address(post(buf, 8))); 4223 crc32cx(crc, crc, tmp1); 4224 ldr(tmp3, Address(post(buf, 8))); 4225 crc32cx(crc, crc, tmp2); 4226 crc32cx(crc, crc, tmp3); 4227 br(Assembler::GE, CRC_by32_loop); 4228 cmn(len, (u1)32); 4229 br(Assembler::NE, CRC_less32); 4230 b(L_exit); 4231 4232 BIND(CRC_by4_loop); 4233 ldrw(tmp0, Address(post(buf, 4))); 4234 subs(len, len, 4); 4235 crc32cw(crc, crc, tmp0); 4236 br(Assembler::GE, CRC_by4_loop); 4237 adds(len, len, 4); 4238 br(Assembler::LE, L_exit); 4239 BIND(CRC_by1_loop); 4240 ldrb(tmp0, Address(post(buf, 1))); 4241 subs(len, len, 1); 4242 crc32cb(crc, crc, tmp0); 4243 br(Assembler::GT, CRC_by1_loop); 4244 b(L_exit); 4245 4246 BIND(CRC_by64_pre); 4247 sub(buf, buf, 8); 4248 ldp(tmp0, tmp1, Address(buf, 8)); 4249 crc32cx(crc, crc, tmp0); 4250 ldr(tmp2, Address(buf, 24)); 4251 crc32cx(crc, crc, tmp1); 4252 ldr(tmp3, Address(buf, 32)); 4253 crc32cx(crc, crc, tmp2); 4254 ldr(tmp0, Address(buf, 40)); 4255 crc32cx(crc, crc, tmp3); 4256 ldr(tmp1, Address(buf, 48)); 4257 crc32cx(crc, crc, tmp0); 4258 ldr(tmp2, Address(buf, 56)); 4259 crc32cx(crc, crc, tmp1); 4260 ldr(tmp3, Address(pre(buf, 64))); 4261 4262 b(CRC_by64_loop); 4263 4264 align(CodeEntryAlignment); 4265 BIND(CRC_by64_loop); 4266 subs(len, len, 64); 4267 crc32cx(crc, crc, tmp2); 4268 ldr(tmp0, Address(buf, 8)); 4269 crc32cx(crc, crc, tmp3); 4270 ldr(tmp1, Address(buf, 16)); 4271 crc32cx(crc, crc, tmp0); 4272 ldr(tmp2, Address(buf, 24)); 4273 crc32cx(crc, crc, tmp1); 4274 ldr(tmp3, Address(buf, 32)); 4275 crc32cx(crc, crc, tmp2); 4276 ldr(tmp0, Address(buf, 40)); 4277 crc32cx(crc, crc, tmp3); 4278 ldr(tmp1, Address(buf, 48)); 4279 crc32cx(crc, crc, tmp0); 4280 ldr(tmp2, Address(buf, 56)); 4281 crc32cx(crc, crc, tmp1); 4282 ldr(tmp3, Address(pre(buf, 64))); 4283 br(Assembler::GE, CRC_by64_loop); 4284 4285 // post-loop 4286 crc32cx(crc, crc, tmp2); 4287 crc32cx(crc, crc, tmp3); 4288 4289 sub(len, len, 64); 4290 add(buf, buf, 8); 4291 cmn(len, (u1)128); 4292 br(Assembler::NE, CRC_less64); 4293 BIND(L_exit); 4294 } 4295 4296 /** 4297 * @param crc register containing existing CRC (32-bit) 4298 * @param buf register pointing to input byte buffer (byte*) 4299 * @param len register containing number of bytes 4300 * @param table register that will contain address of CRC table 4301 * @param tmp scratch register 4302 */ 4303 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4304 Register table0, Register table1, Register table2, Register table3, 4305 Register tmp, Register tmp2, Register tmp3) { 4306 if (UseCryptoPmullForCRC32) { 4307 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4308 } else { 4309 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4310 } 4311 } 4312 4313 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4314 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4315 Label CRC_by128_loop; 4316 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4317 4318 sub(len, len, 256); 4319 Register table = tmp0; 4320 { 4321 uint64_t offset; 4322 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4323 add(table, table, offset); 4324 } 4325 add(table, table, table_offset); 4326 4327 // Registers v0..v7 are used as data registers. 4328 // Registers v16..v31 are used as tmp registers. 4329 sub(buf, buf, 0x10); 4330 ldrq(v0, Address(buf, 0x10)); 4331 ldrq(v1, Address(buf, 0x20)); 4332 ldrq(v2, Address(buf, 0x30)); 4333 ldrq(v3, Address(buf, 0x40)); 4334 ldrq(v4, Address(buf, 0x50)); 4335 ldrq(v5, Address(buf, 0x60)); 4336 ldrq(v6, Address(buf, 0x70)); 4337 ldrq(v7, Address(pre(buf, 0x80))); 4338 4339 movi(v31, T4S, 0); 4340 mov(v31, S, 0, crc); 4341 eor(v0, T16B, v0, v31); 4342 4343 // Register v16 contains constants from the crc table. 4344 ldrq(v16, Address(table)); 4345 b(CRC_by128_loop); 4346 4347 align(OptoLoopAlignment); 4348 BIND(CRC_by128_loop); 4349 pmull (v17, T1Q, v0, v16, T1D); 4350 pmull2(v18, T1Q, v0, v16, T2D); 4351 ldrq(v0, Address(buf, 0x10)); 4352 eor3(v0, T16B, v17, v18, v0); 4353 4354 pmull (v19, T1Q, v1, v16, T1D); 4355 pmull2(v20, T1Q, v1, v16, T2D); 4356 ldrq(v1, Address(buf, 0x20)); 4357 eor3(v1, T16B, v19, v20, v1); 4358 4359 pmull (v21, T1Q, v2, v16, T1D); 4360 pmull2(v22, T1Q, v2, v16, T2D); 4361 ldrq(v2, Address(buf, 0x30)); 4362 eor3(v2, T16B, v21, v22, v2); 4363 4364 pmull (v23, T1Q, v3, v16, T1D); 4365 pmull2(v24, T1Q, v3, v16, T2D); 4366 ldrq(v3, Address(buf, 0x40)); 4367 eor3(v3, T16B, v23, v24, v3); 4368 4369 pmull (v25, T1Q, v4, v16, T1D); 4370 pmull2(v26, T1Q, v4, v16, T2D); 4371 ldrq(v4, Address(buf, 0x50)); 4372 eor3(v4, T16B, v25, v26, v4); 4373 4374 pmull (v27, T1Q, v5, v16, T1D); 4375 pmull2(v28, T1Q, v5, v16, T2D); 4376 ldrq(v5, Address(buf, 0x60)); 4377 eor3(v5, T16B, v27, v28, v5); 4378 4379 pmull (v29, T1Q, v6, v16, T1D); 4380 pmull2(v30, T1Q, v6, v16, T2D); 4381 ldrq(v6, Address(buf, 0x70)); 4382 eor3(v6, T16B, v29, v30, v6); 4383 4384 // Reuse registers v23, v24. 4385 // Using them won't block the first instruction of the next iteration. 4386 pmull (v23, T1Q, v7, v16, T1D); 4387 pmull2(v24, T1Q, v7, v16, T2D); 4388 ldrq(v7, Address(pre(buf, 0x80))); 4389 eor3(v7, T16B, v23, v24, v7); 4390 4391 subs(len, len, 0x80); 4392 br(Assembler::GE, CRC_by128_loop); 4393 4394 // fold into 512 bits 4395 // Use v31 for constants because v16 can be still in use. 4396 ldrq(v31, Address(table, 0x10)); 4397 4398 pmull (v17, T1Q, v0, v31, T1D); 4399 pmull2(v18, T1Q, v0, v31, T2D); 4400 eor3(v0, T16B, v17, v18, v4); 4401 4402 pmull (v19, T1Q, v1, v31, T1D); 4403 pmull2(v20, T1Q, v1, v31, T2D); 4404 eor3(v1, T16B, v19, v20, v5); 4405 4406 pmull (v21, T1Q, v2, v31, T1D); 4407 pmull2(v22, T1Q, v2, v31, T2D); 4408 eor3(v2, T16B, v21, v22, v6); 4409 4410 pmull (v23, T1Q, v3, v31, T1D); 4411 pmull2(v24, T1Q, v3, v31, T2D); 4412 eor3(v3, T16B, v23, v24, v7); 4413 4414 // fold into 128 bits 4415 // Use v17 for constants because v31 can be still in use. 4416 ldrq(v17, Address(table, 0x20)); 4417 pmull (v25, T1Q, v0, v17, T1D); 4418 pmull2(v26, T1Q, v0, v17, T2D); 4419 eor3(v3, T16B, v3, v25, v26); 4420 4421 // Use v18 for constants because v17 can be still in use. 4422 ldrq(v18, Address(table, 0x30)); 4423 pmull (v27, T1Q, v1, v18, T1D); 4424 pmull2(v28, T1Q, v1, v18, T2D); 4425 eor3(v3, T16B, v3, v27, v28); 4426 4427 // Use v19 for constants because v18 can be still in use. 4428 ldrq(v19, Address(table, 0x40)); 4429 pmull (v29, T1Q, v2, v19, T1D); 4430 pmull2(v30, T1Q, v2, v19, T2D); 4431 eor3(v0, T16B, v3, v29, v30); 4432 4433 add(len, len, 0x80); 4434 add(buf, buf, 0x10); 4435 4436 mov(tmp0, v0, D, 0); 4437 mov(tmp1, v0, D, 1); 4438 } 4439 4440 SkipIfEqual::SkipIfEqual( 4441 MacroAssembler* masm, const bool* flag_addr, bool value) { 4442 _masm = masm; 4443 uint64_t offset; 4444 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4445 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4446 if (value) { 4447 _masm->cbnzw(rscratch1, _label); 4448 } else { 4449 _masm->cbzw(rscratch1, _label); 4450 } 4451 } 4452 4453 SkipIfEqual::~SkipIfEqual() { 4454 _masm->bind(_label); 4455 } 4456 4457 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4458 Address adr; 4459 switch(dst.getMode()) { 4460 case Address::base_plus_offset: 4461 // This is the expected mode, although we allow all the other 4462 // forms below. 4463 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4464 break; 4465 default: 4466 lea(rscratch2, dst); 4467 adr = Address(rscratch2); 4468 break; 4469 } 4470 ldr(rscratch1, adr); 4471 add(rscratch1, rscratch1, src); 4472 str(rscratch1, adr); 4473 } 4474 4475 void MacroAssembler::cmpptr(Register src1, Address src2) { 4476 uint64_t offset; 4477 adrp(rscratch1, src2, offset); 4478 ldr(rscratch1, Address(rscratch1, offset)); 4479 cmp(src1, rscratch1); 4480 } 4481 4482 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4483 cmp(obj1, obj2); 4484 } 4485 4486 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4487 load_method_holder(rresult, rmethod); 4488 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4489 } 4490 4491 void MacroAssembler::load_method_holder(Register holder, Register method) { 4492 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4493 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4494 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4495 } 4496 4497 void MacroAssembler::load_klass(Register dst, Register src) { 4498 if (UseCompressedClassPointers) { 4499 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4500 decode_klass_not_null(dst); 4501 } else { 4502 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4503 } 4504 } 4505 4506 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4507 if (RestoreMXCSROnJNICalls) { 4508 Label OK; 4509 get_fpcr(tmp1); 4510 mov(tmp2, tmp1); 4511 // Set FPCR to the state we need. We do want Round to Nearest. We 4512 // don't want non-IEEE rounding modes or floating-point traps. 4513 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4514 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4515 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4516 eor(tmp2, tmp1, tmp2); 4517 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4518 set_fpcr(tmp1); 4519 bind(OK); 4520 } 4521 } 4522 4523 // ((OopHandle)result).resolve(); 4524 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4525 // OopHandle::resolve is an indirection. 4526 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4527 } 4528 4529 // ((WeakHandle)result).resolve(); 4530 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4531 assert_different_registers(result, tmp1, tmp2); 4532 Label resolved; 4533 4534 // A null weak handle resolves to null. 4535 cbz(result, resolved); 4536 4537 // Only 64 bit platforms support GCs that require a tmp register 4538 // WeakHandle::resolve is an indirection like jweak. 4539 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4540 result, Address(result), tmp1, tmp2); 4541 bind(resolved); 4542 } 4543 4544 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4545 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4546 ldr(dst, Address(rmethod, Method::const_offset())); 4547 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4548 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4549 ldr(dst, Address(dst, mirror_offset)); 4550 resolve_oop_handle(dst, tmp1, tmp2); 4551 } 4552 4553 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4554 if (UseCompressedClassPointers) { 4555 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4556 if (CompressedKlassPointers::base() == nullptr) { 4557 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4558 return; 4559 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4560 && CompressedKlassPointers::shift() == 0) { 4561 // Only the bottom 32 bits matter 4562 cmpw(trial_klass, tmp); 4563 return; 4564 } 4565 decode_klass_not_null(tmp); 4566 } else { 4567 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4568 } 4569 cmp(trial_klass, tmp); 4570 } 4571 4572 void MacroAssembler::store_klass(Register dst, Register src) { 4573 // FIXME: Should this be a store release? concurrent gcs assumes 4574 // klass length is valid if klass field is not null. 4575 if (UseCompressedClassPointers) { 4576 encode_klass_not_null(src); 4577 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4578 } else { 4579 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4580 } 4581 } 4582 4583 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4584 if (UseCompressedClassPointers) { 4585 // Store to klass gap in destination 4586 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4587 } 4588 } 4589 4590 // Algorithm must match CompressedOops::encode. 4591 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4592 #ifdef ASSERT 4593 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4594 #endif 4595 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4596 if (CompressedOops::base() == nullptr) { 4597 if (CompressedOops::shift() != 0) { 4598 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4599 lsr(d, s, LogMinObjAlignmentInBytes); 4600 } else { 4601 mov(d, s); 4602 } 4603 } else { 4604 subs(d, s, rheapbase); 4605 csel(d, d, zr, Assembler::HS); 4606 lsr(d, d, LogMinObjAlignmentInBytes); 4607 4608 /* Old algorithm: is this any worse? 4609 Label nonnull; 4610 cbnz(r, nonnull); 4611 sub(r, r, rheapbase); 4612 bind(nonnull); 4613 lsr(r, r, LogMinObjAlignmentInBytes); 4614 */ 4615 } 4616 } 4617 4618 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4619 #ifdef ASSERT 4620 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4621 if (CheckCompressedOops) { 4622 Label ok; 4623 cbnz(r, ok); 4624 stop("null oop passed to encode_heap_oop_not_null"); 4625 bind(ok); 4626 } 4627 #endif 4628 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 4629 if (CompressedOops::base() != nullptr) { 4630 sub(r, r, rheapbase); 4631 } 4632 if (CompressedOops::shift() != 0) { 4633 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4634 lsr(r, r, LogMinObjAlignmentInBytes); 4635 } 4636 } 4637 4638 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 4639 #ifdef ASSERT 4640 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 4641 if (CheckCompressedOops) { 4642 Label ok; 4643 cbnz(src, ok); 4644 stop("null oop passed to encode_heap_oop_not_null2"); 4645 bind(ok); 4646 } 4647 #endif 4648 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 4649 4650 Register data = src; 4651 if (CompressedOops::base() != nullptr) { 4652 sub(dst, src, rheapbase); 4653 data = dst; 4654 } 4655 if (CompressedOops::shift() != 0) { 4656 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4657 lsr(dst, data, LogMinObjAlignmentInBytes); 4658 data = dst; 4659 } 4660 if (data == src) 4661 mov(dst, src); 4662 } 4663 4664 void MacroAssembler::decode_heap_oop(Register d, Register s) { 4665 #ifdef ASSERT 4666 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 4667 #endif 4668 if (CompressedOops::base() == nullptr) { 4669 if (CompressedOops::shift() != 0 || d != s) { 4670 lsl(d, s, CompressedOops::shift()); 4671 } 4672 } else { 4673 Label done; 4674 if (d != s) 4675 mov(d, s); 4676 cbz(s, done); 4677 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 4678 bind(done); 4679 } 4680 verify_oop_msg(d, "broken oop in decode_heap_oop"); 4681 } 4682 4683 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4684 assert (UseCompressedOops, "should only be used for compressed headers"); 4685 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4686 // Cannot assert, unverified entry point counts instructions (see .ad file) 4687 // vtableStubs also counts instructions in pd_code_size_limit. 4688 // Also do not verify_oop as this is called by verify_oop. 4689 if (CompressedOops::shift() != 0) { 4690 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4691 if (CompressedOops::base() != nullptr) { 4692 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 4693 } else { 4694 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 4695 } 4696 } else { 4697 assert (CompressedOops::base() == nullptr, "sanity"); 4698 } 4699 } 4700 4701 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 4702 assert (UseCompressedOops, "should only be used for compressed headers"); 4703 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4704 // Cannot assert, unverified entry point counts instructions (see .ad file) 4705 // vtableStubs also counts instructions in pd_code_size_limit. 4706 // Also do not verify_oop as this is called by verify_oop. 4707 if (CompressedOops::shift() != 0) { 4708 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4709 if (CompressedOops::base() != nullptr) { 4710 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 4711 } else { 4712 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 4713 } 4714 } else { 4715 assert (CompressedOops::base() == nullptr, "sanity"); 4716 if (dst != src) { 4717 mov(dst, src); 4718 } 4719 } 4720 } 4721 4722 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 4723 4724 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 4725 assert(UseCompressedClassPointers, "not using compressed class pointers"); 4726 assert(Metaspace::initialized(), "metaspace not initialized yet"); 4727 4728 if (_klass_decode_mode != KlassDecodeNone) { 4729 return _klass_decode_mode; 4730 } 4731 4732 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 4733 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 4734 4735 if (CompressedKlassPointers::base() == nullptr) { 4736 return (_klass_decode_mode = KlassDecodeZero); 4737 } 4738 4739 if (operand_valid_for_logical_immediate( 4740 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 4741 const uint64_t range_mask = 4742 (1ULL << log2i(CompressedKlassPointers::range())) - 1; 4743 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 4744 return (_klass_decode_mode = KlassDecodeXor); 4745 } 4746 } 4747 4748 const uint64_t shifted_base = 4749 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 4750 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 4751 "compressed class base bad alignment"); 4752 4753 return (_klass_decode_mode = KlassDecodeMovk); 4754 } 4755 4756 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 4757 switch (klass_decode_mode()) { 4758 case KlassDecodeZero: 4759 if (CompressedKlassPointers::shift() != 0) { 4760 lsr(dst, src, LogKlassAlignmentInBytes); 4761 } else { 4762 if (dst != src) mov(dst, src); 4763 } 4764 break; 4765 4766 case KlassDecodeXor: 4767 if (CompressedKlassPointers::shift() != 0) { 4768 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4769 lsr(dst, dst, LogKlassAlignmentInBytes); 4770 } else { 4771 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4772 } 4773 break; 4774 4775 case KlassDecodeMovk: 4776 if (CompressedKlassPointers::shift() != 0) { 4777 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 4778 } else { 4779 movw(dst, src); 4780 } 4781 break; 4782 4783 case KlassDecodeNone: 4784 ShouldNotReachHere(); 4785 break; 4786 } 4787 } 4788 4789 void MacroAssembler::encode_klass_not_null(Register r) { 4790 encode_klass_not_null(r, r); 4791 } 4792 4793 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 4794 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 4795 4796 switch (klass_decode_mode()) { 4797 case KlassDecodeZero: 4798 if (CompressedKlassPointers::shift() != 0) { 4799 lsl(dst, src, LogKlassAlignmentInBytes); 4800 } else { 4801 if (dst != src) mov(dst, src); 4802 } 4803 break; 4804 4805 case KlassDecodeXor: 4806 if (CompressedKlassPointers::shift() != 0) { 4807 lsl(dst, src, LogKlassAlignmentInBytes); 4808 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 4809 } else { 4810 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4811 } 4812 break; 4813 4814 case KlassDecodeMovk: { 4815 const uint64_t shifted_base = 4816 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 4817 4818 if (dst != src) movw(dst, src); 4819 movk(dst, shifted_base >> 32, 32); 4820 4821 if (CompressedKlassPointers::shift() != 0) { 4822 lsl(dst, dst, LogKlassAlignmentInBytes); 4823 } 4824 4825 break; 4826 } 4827 4828 case KlassDecodeNone: 4829 ShouldNotReachHere(); 4830 break; 4831 } 4832 } 4833 4834 void MacroAssembler::decode_klass_not_null(Register r) { 4835 decode_klass_not_null(r, r); 4836 } 4837 4838 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 4839 #ifdef ASSERT 4840 { 4841 ThreadInVMfromUnknown tiv; 4842 assert (UseCompressedOops, "should only be used for compressed oops"); 4843 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4844 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4845 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 4846 } 4847 #endif 4848 int oop_index = oop_recorder()->find_index(obj); 4849 InstructionMark im(this); 4850 RelocationHolder rspec = oop_Relocation::spec(oop_index); 4851 code_section()->relocate(inst_mark(), rspec); 4852 movz(dst, 0xDEAD, 16); 4853 movk(dst, 0xBEEF); 4854 } 4855 4856 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 4857 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 4858 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4859 int index = oop_recorder()->find_index(k); 4860 assert(! Universe::heap()->is_in(k), "should not be an oop"); 4861 4862 InstructionMark im(this); 4863 RelocationHolder rspec = metadata_Relocation::spec(index); 4864 code_section()->relocate(inst_mark(), rspec); 4865 narrowKlass nk = CompressedKlassPointers::encode(k); 4866 movz(dst, (nk >> 16), 16); 4867 movk(dst, nk & 0xffff); 4868 } 4869 4870 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 4871 Register dst, Address src, 4872 Register tmp1, Register tmp2) { 4873 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4874 decorators = AccessInternal::decorator_fixup(decorators, type); 4875 bool as_raw = (decorators & AS_RAW) != 0; 4876 if (as_raw) { 4877 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 4878 } else { 4879 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 4880 } 4881 } 4882 4883 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 4884 Address dst, Register val, 4885 Register tmp1, Register tmp2, Register tmp3) { 4886 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4887 decorators = AccessInternal::decorator_fixup(decorators, type); 4888 bool as_raw = (decorators & AS_RAW) != 0; 4889 if (as_raw) { 4890 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 4891 } else { 4892 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 4893 } 4894 } 4895 4896 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 4897 Register tmp2, DecoratorSet decorators) { 4898 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 4899 } 4900 4901 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 4902 Register tmp2, DecoratorSet decorators) { 4903 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 4904 } 4905 4906 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 4907 Register tmp2, Register tmp3, DecoratorSet decorators) { 4908 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 4909 } 4910 4911 // Used for storing nulls. 4912 void MacroAssembler::store_heap_oop_null(Address dst) { 4913 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 4914 } 4915 4916 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 4917 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 4918 int index = oop_recorder()->allocate_metadata_index(obj); 4919 RelocationHolder rspec = metadata_Relocation::spec(index); 4920 return Address((address)obj, rspec); 4921 } 4922 4923 // Move an oop into a register. 4924 void MacroAssembler::movoop(Register dst, jobject obj) { 4925 int oop_index; 4926 if (obj == nullptr) { 4927 oop_index = oop_recorder()->allocate_oop_index(obj); 4928 } else { 4929 #ifdef ASSERT 4930 { 4931 ThreadInVMfromUnknown tiv; 4932 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 4933 } 4934 #endif 4935 oop_index = oop_recorder()->find_index(obj); 4936 } 4937 RelocationHolder rspec = oop_Relocation::spec(oop_index); 4938 4939 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 4940 mov(dst, Address((address)obj, rspec)); 4941 } else { 4942 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 4943 ldr_constant(dst, Address(dummy, rspec)); 4944 } 4945 4946 } 4947 4948 // Move a metadata address into a register. 4949 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 4950 int oop_index; 4951 if (obj == nullptr) { 4952 oop_index = oop_recorder()->allocate_metadata_index(obj); 4953 } else { 4954 oop_index = oop_recorder()->find_index(obj); 4955 } 4956 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 4957 mov(dst, Address((address)obj, rspec)); 4958 } 4959 4960 Address MacroAssembler::constant_oop_address(jobject obj) { 4961 #ifdef ASSERT 4962 { 4963 ThreadInVMfromUnknown tiv; 4964 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4965 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 4966 } 4967 #endif 4968 int oop_index = oop_recorder()->find_index(obj); 4969 return Address((address)obj, oop_Relocation::spec(oop_index)); 4970 } 4971 4972 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4973 void MacroAssembler::tlab_allocate(Register obj, 4974 Register var_size_in_bytes, 4975 int con_size_in_bytes, 4976 Register t1, 4977 Register t2, 4978 Label& slow_case) { 4979 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4980 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4981 } 4982 4983 void MacroAssembler::inc_held_monitor_count() { 4984 Address dst = Address(rthread, JavaThread::held_monitor_count_offset()); 4985 #ifdef ASSERT 4986 ldr(rscratch2, dst); 4987 increment(rscratch2); 4988 str(rscratch2, dst); 4989 Label ok; 4990 tbz(rscratch2, 63, ok); 4991 STOP("assert(held monitor count underflow)"); 4992 should_not_reach_here(); 4993 bind(ok); 4994 #else 4995 increment(dst); 4996 #endif 4997 } 4998 4999 void MacroAssembler::dec_held_monitor_count() { 5000 Address dst = Address(rthread, JavaThread::held_monitor_count_offset()); 5001 #ifdef ASSERT 5002 ldr(rscratch2, dst); 5003 decrement(rscratch2); 5004 str(rscratch2, dst); 5005 Label ok; 5006 tbz(rscratch2, 63, ok); 5007 STOP("assert(held monitor count underflow)"); 5008 should_not_reach_here(); 5009 bind(ok); 5010 #else 5011 decrement(dst); 5012 #endif 5013 } 5014 5015 void MacroAssembler::verify_tlab() { 5016 #ifdef ASSERT 5017 if (UseTLAB && VerifyOops) { 5018 Label next, ok; 5019 5020 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5021 5022 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5023 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5024 cmp(rscratch2, rscratch1); 5025 br(Assembler::HS, next); 5026 STOP("assert(top >= start)"); 5027 should_not_reach_here(); 5028 5029 bind(next); 5030 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5031 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5032 cmp(rscratch2, rscratch1); 5033 br(Assembler::HS, ok); 5034 STOP("assert(top <= end)"); 5035 should_not_reach_here(); 5036 5037 bind(ok); 5038 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5039 } 5040 #endif 5041 } 5042 5043 // Writes to stack successive pages until offset reached to check for 5044 // stack overflow + shadow pages. This clobbers tmp. 5045 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5046 assert_different_registers(tmp, size, rscratch1); 5047 mov(tmp, sp); 5048 // Bang stack for total size given plus shadow page size. 5049 // Bang one page at a time because large size can bang beyond yellow and 5050 // red zones. 5051 Label loop; 5052 mov(rscratch1, (int)os::vm_page_size()); 5053 bind(loop); 5054 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5055 subsw(size, size, rscratch1); 5056 str(size, Address(tmp)); 5057 br(Assembler::GT, loop); 5058 5059 // Bang down shadow pages too. 5060 // At this point, (tmp-0) is the last address touched, so don't 5061 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5062 // was post-decremented.) Skip this address by starting at i=1, and 5063 // touch a few more pages below. N.B. It is important to touch all 5064 // the way down to and including i=StackShadowPages. 5065 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5066 // this could be any sized move but this is can be a debugging crumb 5067 // so the bigger the better. 5068 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5069 str(size, Address(tmp)); 5070 } 5071 } 5072 5073 // Move the address of the polling page into dest. 5074 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5075 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5076 } 5077 5078 // Read the polling page. The address of the polling page must 5079 // already be in r. 5080 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5081 address mark; 5082 { 5083 InstructionMark im(this); 5084 code_section()->relocate(inst_mark(), rtype); 5085 ldrw(zr, Address(r, 0)); 5086 mark = inst_mark(); 5087 } 5088 verify_cross_modify_fence_not_required(); 5089 return mark; 5090 } 5091 5092 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5093 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5094 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5095 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5096 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5097 int64_t offset_low = dest_page - low_page; 5098 int64_t offset_high = dest_page - high_page; 5099 5100 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5101 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5102 5103 InstructionMark im(this); 5104 code_section()->relocate(inst_mark(), dest.rspec()); 5105 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5106 // the code cache so that if it is relocated we know it will still reach 5107 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5108 _adrp(reg1, dest.target()); 5109 } else { 5110 uint64_t target = (uint64_t)dest.target(); 5111 uint64_t adrp_target 5112 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5113 5114 _adrp(reg1, (address)adrp_target); 5115 movk(reg1, target >> 32, 32); 5116 } 5117 byte_offset = (uint64_t)dest.target() & 0xfff; 5118 } 5119 5120 void MacroAssembler::load_byte_map_base(Register reg) { 5121 CardTable::CardValue* byte_map_base = 5122 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5123 5124 // Strictly speaking the byte_map_base isn't an address at all, and it might 5125 // even be negative. It is thus materialised as a constant. 5126 mov(reg, (uint64_t)byte_map_base); 5127 } 5128 5129 void MacroAssembler::build_frame(int framesize) { 5130 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5131 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5132 protect_return_address(); 5133 if (framesize < ((1 << 9) + 2 * wordSize)) { 5134 sub(sp, sp, framesize); 5135 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5136 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5137 } else { 5138 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5139 if (PreserveFramePointer) mov(rfp, sp); 5140 if (framesize < ((1 << 12) + 2 * wordSize)) 5141 sub(sp, sp, framesize - 2 * wordSize); 5142 else { 5143 mov(rscratch1, framesize - 2 * wordSize); 5144 sub(sp, sp, rscratch1); 5145 } 5146 } 5147 verify_cross_modify_fence_not_required(); 5148 } 5149 5150 void MacroAssembler::remove_frame(int framesize) { 5151 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5152 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5153 if (framesize < ((1 << 9) + 2 * wordSize)) { 5154 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5155 add(sp, sp, framesize); 5156 } else { 5157 if (framesize < ((1 << 12) + 2 * wordSize)) 5158 add(sp, sp, framesize - 2 * wordSize); 5159 else { 5160 mov(rscratch1, framesize - 2 * wordSize); 5161 add(sp, sp, rscratch1); 5162 } 5163 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5164 } 5165 authenticate_return_address(); 5166 } 5167 5168 5169 // This method counts leading positive bytes (highest bit not set) in provided byte array 5170 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5171 // Simple and most common case of aligned small array which is not at the 5172 // end of memory page is placed here. All other cases are in stub. 5173 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5174 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5175 assert_different_registers(ary1, len, result); 5176 5177 mov(result, len); 5178 cmpw(len, 0); 5179 br(LE, DONE); 5180 cmpw(len, 4 * wordSize); 5181 br(GE, STUB_LONG); // size > 32 then go to stub 5182 5183 int shift = 64 - exact_log2(os::vm_page_size()); 5184 lsl(rscratch1, ary1, shift); 5185 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5186 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5187 br(CS, STUB); // at the end of page then go to stub 5188 subs(len, len, wordSize); 5189 br(LT, END); 5190 5191 BIND(LOOP); 5192 ldr(rscratch1, Address(post(ary1, wordSize))); 5193 tst(rscratch1, UPPER_BIT_MASK); 5194 br(NE, SET_RESULT); 5195 subs(len, len, wordSize); 5196 br(GE, LOOP); 5197 cmpw(len, -wordSize); 5198 br(EQ, DONE); 5199 5200 BIND(END); 5201 ldr(rscratch1, Address(ary1)); 5202 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5203 lslv(rscratch1, rscratch1, rscratch2); 5204 tst(rscratch1, UPPER_BIT_MASK); 5205 br(NE, SET_RESULT); 5206 b(DONE); 5207 5208 BIND(STUB); 5209 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5210 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5211 address tpc1 = trampoline_call(count_pos); 5212 if (tpc1 == nullptr) { 5213 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5214 postcond(pc() == badAddress); 5215 return nullptr; 5216 } 5217 b(DONE); 5218 5219 BIND(STUB_LONG); 5220 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5221 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5222 address tpc2 = trampoline_call(count_pos_long); 5223 if (tpc2 == nullptr) { 5224 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5225 postcond(pc() == badAddress); 5226 return nullptr; 5227 } 5228 b(DONE); 5229 5230 BIND(SET_RESULT); 5231 5232 add(len, len, wordSize); 5233 sub(result, result, len); 5234 5235 BIND(DONE); 5236 postcond(pc() != badAddress); 5237 return pc(); 5238 } 5239 5240 // Clobbers: rscratch1, rscratch2, rflags 5241 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5242 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5243 Register tmp4, Register tmp5, Register result, 5244 Register cnt1, int elem_size) { 5245 Label DONE, SAME; 5246 Register tmp1 = rscratch1; 5247 Register tmp2 = rscratch2; 5248 Register cnt2 = tmp2; // cnt2 only used in array length compare 5249 int elem_per_word = wordSize/elem_size; 5250 int log_elem_size = exact_log2(elem_size); 5251 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5252 int base_offset 5253 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5254 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5255 5256 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5257 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5258 5259 #ifndef PRODUCT 5260 { 5261 const char kind = (elem_size == 2) ? 'U' : 'L'; 5262 char comment[64]; 5263 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5264 BLOCK_COMMENT(comment); 5265 } 5266 #endif 5267 5268 // if (a1 == a2) 5269 // return true; 5270 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5271 br(EQ, SAME); 5272 5273 if (UseSimpleArrayEquals) { 5274 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5275 // if (a1 == nullptr || a2 == nullptr) 5276 // return false; 5277 // a1 & a2 == 0 means (some-pointer is null) or 5278 // (very-rare-or-even-probably-impossible-pointer-values) 5279 // so, we can save one branch in most cases 5280 tst(a1, a2); 5281 mov(result, false); 5282 br(EQ, A_MIGHT_BE_NULL); 5283 // if (a1.length != a2.length) 5284 // return false; 5285 bind(A_IS_NOT_NULL); 5286 ldrw(cnt1, Address(a1, length_offset)); 5287 ldrw(cnt2, Address(a2, length_offset)); 5288 eorw(tmp5, cnt1, cnt2); 5289 cbnzw(tmp5, DONE); 5290 lea(a1, Address(a1, base_offset)); 5291 lea(a2, Address(a2, base_offset)); 5292 // Check for short strings, i.e. smaller than wordSize. 5293 subs(cnt1, cnt1, elem_per_word); 5294 br(Assembler::LT, SHORT); 5295 // Main 8 byte comparison loop. 5296 bind(NEXT_WORD); { 5297 ldr(tmp1, Address(post(a1, wordSize))); 5298 ldr(tmp2, Address(post(a2, wordSize))); 5299 subs(cnt1, cnt1, elem_per_word); 5300 eor(tmp5, tmp1, tmp2); 5301 cbnz(tmp5, DONE); 5302 } br(GT, NEXT_WORD); 5303 // Last longword. In the case where length == 4 we compare the 5304 // same longword twice, but that's still faster than another 5305 // conditional branch. 5306 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5307 // length == 4. 5308 if (log_elem_size > 0) 5309 lsl(cnt1, cnt1, log_elem_size); 5310 ldr(tmp3, Address(a1, cnt1)); 5311 ldr(tmp4, Address(a2, cnt1)); 5312 eor(tmp5, tmp3, tmp4); 5313 cbnz(tmp5, DONE); 5314 b(SAME); 5315 bind(A_MIGHT_BE_NULL); 5316 // in case both a1 and a2 are not-null, proceed with loads 5317 cbz(a1, DONE); 5318 cbz(a2, DONE); 5319 b(A_IS_NOT_NULL); 5320 bind(SHORT); 5321 5322 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5323 { 5324 ldrw(tmp1, Address(post(a1, 4))); 5325 ldrw(tmp2, Address(post(a2, 4))); 5326 eorw(tmp5, tmp1, tmp2); 5327 cbnzw(tmp5, DONE); 5328 } 5329 bind(TAIL03); 5330 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5331 { 5332 ldrh(tmp3, Address(post(a1, 2))); 5333 ldrh(tmp4, Address(post(a2, 2))); 5334 eorw(tmp5, tmp3, tmp4); 5335 cbnzw(tmp5, DONE); 5336 } 5337 bind(TAIL01); 5338 if (elem_size == 1) { // Only needed when comparing byte arrays. 5339 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5340 { 5341 ldrb(tmp1, a1); 5342 ldrb(tmp2, a2); 5343 eorw(tmp5, tmp1, tmp2); 5344 cbnzw(tmp5, DONE); 5345 } 5346 } 5347 } else { 5348 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5349 CSET_EQ, LAST_CHECK; 5350 mov(result, false); 5351 cbz(a1, DONE); 5352 ldrw(cnt1, Address(a1, length_offset)); 5353 cbz(a2, DONE); 5354 ldrw(cnt2, Address(a2, length_offset)); 5355 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5356 // faster to perform another branch before comparing a1 and a2 5357 cmp(cnt1, (u1)elem_per_word); 5358 br(LE, SHORT); // short or same 5359 ldr(tmp3, Address(pre(a1, base_offset))); 5360 subs(zr, cnt1, stubBytesThreshold); 5361 br(GE, STUB); 5362 ldr(tmp4, Address(pre(a2, base_offset))); 5363 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5364 cmp(cnt2, cnt1); 5365 br(NE, DONE); 5366 5367 // Main 16 byte comparison loop with 2 exits 5368 bind(NEXT_DWORD); { 5369 ldr(tmp1, Address(pre(a1, wordSize))); 5370 ldr(tmp2, Address(pre(a2, wordSize))); 5371 subs(cnt1, cnt1, 2 * elem_per_word); 5372 br(LE, TAIL); 5373 eor(tmp4, tmp3, tmp4); 5374 cbnz(tmp4, DONE); 5375 ldr(tmp3, Address(pre(a1, wordSize))); 5376 ldr(tmp4, Address(pre(a2, wordSize))); 5377 cmp(cnt1, (u1)elem_per_word); 5378 br(LE, TAIL2); 5379 cmp(tmp1, tmp2); 5380 } br(EQ, NEXT_DWORD); 5381 b(DONE); 5382 5383 bind(TAIL); 5384 eor(tmp4, tmp3, tmp4); 5385 eor(tmp2, tmp1, tmp2); 5386 lslv(tmp2, tmp2, tmp5); 5387 orr(tmp5, tmp4, tmp2); 5388 cmp(tmp5, zr); 5389 b(CSET_EQ); 5390 5391 bind(TAIL2); 5392 eor(tmp2, tmp1, tmp2); 5393 cbnz(tmp2, DONE); 5394 b(LAST_CHECK); 5395 5396 bind(STUB); 5397 ldr(tmp4, Address(pre(a2, base_offset))); 5398 cmp(cnt2, cnt1); 5399 br(NE, DONE); 5400 if (elem_size == 2) { // convert to byte counter 5401 lsl(cnt1, cnt1, 1); 5402 } 5403 eor(tmp5, tmp3, tmp4); 5404 cbnz(tmp5, DONE); 5405 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5406 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5407 address tpc = trampoline_call(stub); 5408 if (tpc == nullptr) { 5409 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5410 postcond(pc() == badAddress); 5411 return nullptr; 5412 } 5413 b(DONE); 5414 5415 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5416 // so, if a2 == null => return false(0), else return true, so we can return a2 5417 mov(result, a2); 5418 b(DONE); 5419 bind(SHORT); 5420 cmp(cnt2, cnt1); 5421 br(NE, DONE); 5422 cbz(cnt1, SAME); 5423 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5424 ldr(tmp3, Address(a1, base_offset)); 5425 ldr(tmp4, Address(a2, base_offset)); 5426 bind(LAST_CHECK); 5427 eor(tmp4, tmp3, tmp4); 5428 lslv(tmp5, tmp4, tmp5); 5429 cmp(tmp5, zr); 5430 bind(CSET_EQ); 5431 cset(result, EQ); 5432 b(DONE); 5433 } 5434 5435 bind(SAME); 5436 mov(result, true); 5437 // That's it. 5438 bind(DONE); 5439 5440 BLOCK_COMMENT("} array_equals"); 5441 postcond(pc() != badAddress); 5442 return pc(); 5443 } 5444 5445 // Compare Strings 5446 5447 // For Strings we're passed the address of the first characters in a1 5448 // and a2 and the length in cnt1. 5449 // There are two implementations. For arrays >= 8 bytes, all 5450 // comparisons (including the final one, which may overlap) are 5451 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5452 // halfword, then a short, and then a byte. 5453 5454 void MacroAssembler::string_equals(Register a1, Register a2, 5455 Register result, Register cnt1) 5456 { 5457 Label SAME, DONE, SHORT, NEXT_WORD; 5458 Register tmp1 = rscratch1; 5459 Register tmp2 = rscratch2; 5460 Register cnt2 = tmp2; // cnt2 only used in array length compare 5461 5462 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5463 5464 #ifndef PRODUCT 5465 { 5466 char comment[64]; 5467 snprintf(comment, sizeof comment, "{string_equalsL"); 5468 BLOCK_COMMENT(comment); 5469 } 5470 #endif 5471 5472 mov(result, false); 5473 5474 // Check for short strings, i.e. smaller than wordSize. 5475 subs(cnt1, cnt1, wordSize); 5476 br(Assembler::LT, SHORT); 5477 // Main 8 byte comparison loop. 5478 bind(NEXT_WORD); { 5479 ldr(tmp1, Address(post(a1, wordSize))); 5480 ldr(tmp2, Address(post(a2, wordSize))); 5481 subs(cnt1, cnt1, wordSize); 5482 eor(tmp1, tmp1, tmp2); 5483 cbnz(tmp1, DONE); 5484 } br(GT, NEXT_WORD); 5485 // Last longword. In the case where length == 4 we compare the 5486 // same longword twice, but that's still faster than another 5487 // conditional branch. 5488 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5489 // length == 4. 5490 ldr(tmp1, Address(a1, cnt1)); 5491 ldr(tmp2, Address(a2, cnt1)); 5492 eor(tmp2, tmp1, tmp2); 5493 cbnz(tmp2, DONE); 5494 b(SAME); 5495 5496 bind(SHORT); 5497 Label TAIL03, TAIL01; 5498 5499 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5500 { 5501 ldrw(tmp1, Address(post(a1, 4))); 5502 ldrw(tmp2, Address(post(a2, 4))); 5503 eorw(tmp1, tmp1, tmp2); 5504 cbnzw(tmp1, DONE); 5505 } 5506 bind(TAIL03); 5507 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5508 { 5509 ldrh(tmp1, Address(post(a1, 2))); 5510 ldrh(tmp2, Address(post(a2, 2))); 5511 eorw(tmp1, tmp1, tmp2); 5512 cbnzw(tmp1, DONE); 5513 } 5514 bind(TAIL01); 5515 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5516 { 5517 ldrb(tmp1, a1); 5518 ldrb(tmp2, a2); 5519 eorw(tmp1, tmp1, tmp2); 5520 cbnzw(tmp1, DONE); 5521 } 5522 // Arrays are equal. 5523 bind(SAME); 5524 mov(result, true); 5525 5526 // That's it. 5527 bind(DONE); 5528 BLOCK_COMMENT("} string_equals"); 5529 } 5530 5531 5532 // The size of the blocks erased by the zero_blocks stub. We must 5533 // handle anything smaller than this ourselves in zero_words(). 5534 const int MacroAssembler::zero_words_block_size = 8; 5535 5536 // zero_words() is used by C2 ClearArray patterns and by 5537 // C1_MacroAssembler. It is as small as possible, handling small word 5538 // counts locally and delegating anything larger to the zero_blocks 5539 // stub. It is expanded many times in compiled code, so it is 5540 // important to keep it short. 5541 5542 // ptr: Address of a buffer to be zeroed. 5543 // cnt: Count in HeapWords. 5544 // 5545 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5546 address MacroAssembler::zero_words(Register ptr, Register cnt) 5547 { 5548 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5549 5550 BLOCK_COMMENT("zero_words {"); 5551 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5552 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5553 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5554 5555 subs(rscratch1, cnt, zero_words_block_size); 5556 Label around; 5557 br(LO, around); 5558 { 5559 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5560 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5561 // Make sure this is a C2 compilation. C1 allocates space only for 5562 // trampoline stubs generated by Call LIR ops, and in any case it 5563 // makes sense for a C1 compilation task to proceed as quickly as 5564 // possible. 5565 CompileTask* task; 5566 if (StubRoutines::aarch64::complete() 5567 && Thread::current()->is_Compiler_thread() 5568 && (task = ciEnv::current()->task()) 5569 && is_c2_compile(task->comp_level())) { 5570 address tpc = trampoline_call(zero_blocks); 5571 if (tpc == nullptr) { 5572 DEBUG_ONLY(reset_labels(around)); 5573 return nullptr; 5574 } 5575 } else { 5576 far_call(zero_blocks); 5577 } 5578 } 5579 bind(around); 5580 5581 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5582 // for us. 5583 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5584 Label l; 5585 tbz(cnt, exact_log2(i), l); 5586 for (int j = 0; j < i; j += 2) { 5587 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5588 } 5589 bind(l); 5590 } 5591 { 5592 Label l; 5593 tbz(cnt, 0, l); 5594 str(zr, Address(ptr)); 5595 bind(l); 5596 } 5597 5598 BLOCK_COMMENT("} zero_words"); 5599 return pc(); 5600 } 5601 5602 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5603 // cnt: Immediate count in HeapWords. 5604 // 5605 // r10, r11, rscratch1, and rscratch2 are clobbered. 5606 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5607 { 5608 assert(wordSize <= BlockZeroingLowLimit, 5609 "increase BlockZeroingLowLimit"); 5610 address result = nullptr; 5611 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5612 #ifndef PRODUCT 5613 { 5614 char buf[64]; 5615 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5616 BLOCK_COMMENT(buf); 5617 } 5618 #endif 5619 if (cnt >= 16) { 5620 uint64_t loops = cnt/16; 5621 if (loops > 1) { 5622 mov(rscratch2, loops - 1); 5623 } 5624 { 5625 Label loop; 5626 bind(loop); 5627 for (int i = 0; i < 16; i += 2) { 5628 stp(zr, zr, Address(base, i * BytesPerWord)); 5629 } 5630 add(base, base, 16 * BytesPerWord); 5631 if (loops > 1) { 5632 subs(rscratch2, rscratch2, 1); 5633 br(GE, loop); 5634 } 5635 } 5636 } 5637 cnt %= 16; 5638 int i = cnt & 1; // store any odd word to start 5639 if (i) str(zr, Address(base)); 5640 for (; i < (int)cnt; i += 2) { 5641 stp(zr, zr, Address(base, i * wordSize)); 5642 } 5643 BLOCK_COMMENT("} zero_words"); 5644 result = pc(); 5645 } else { 5646 mov(r10, base); mov(r11, cnt); 5647 result = zero_words(r10, r11); 5648 } 5649 return result; 5650 } 5651 5652 // Zero blocks of memory by using DC ZVA. 5653 // 5654 // Aligns the base address first sufficiently for DC ZVA, then uses 5655 // DC ZVA repeatedly for every full block. cnt is the size to be 5656 // zeroed in HeapWords. Returns the count of words left to be zeroed 5657 // in cnt. 5658 // 5659 // NOTE: This is intended to be used in the zero_blocks() stub. If 5660 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 5661 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 5662 Register tmp = rscratch1; 5663 Register tmp2 = rscratch2; 5664 int zva_length = VM_Version::zva_length(); 5665 Label initial_table_end, loop_zva; 5666 Label fini; 5667 5668 // Base must be 16 byte aligned. If not just return and let caller handle it 5669 tst(base, 0x0f); 5670 br(Assembler::NE, fini); 5671 // Align base with ZVA length. 5672 neg(tmp, base); 5673 andr(tmp, tmp, zva_length - 1); 5674 5675 // tmp: the number of bytes to be filled to align the base with ZVA length. 5676 add(base, base, tmp); 5677 sub(cnt, cnt, tmp, Assembler::ASR, 3); 5678 adr(tmp2, initial_table_end); 5679 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 5680 br(tmp2); 5681 5682 for (int i = -zva_length + 16; i < 0; i += 16) 5683 stp(zr, zr, Address(base, i)); 5684 bind(initial_table_end); 5685 5686 sub(cnt, cnt, zva_length >> 3); 5687 bind(loop_zva); 5688 dc(Assembler::ZVA, base); 5689 subs(cnt, cnt, zva_length >> 3); 5690 add(base, base, zva_length); 5691 br(Assembler::GE, loop_zva); 5692 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 5693 bind(fini); 5694 } 5695 5696 // base: Address of a buffer to be filled, 8 bytes aligned. 5697 // cnt: Count in 8-byte unit. 5698 // value: Value to be filled with. 5699 // base will point to the end of the buffer after filling. 5700 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 5701 { 5702 // Algorithm: 5703 // 5704 // if (cnt == 0) { 5705 // return; 5706 // } 5707 // if ((p & 8) != 0) { 5708 // *p++ = v; 5709 // } 5710 // 5711 // scratch1 = cnt & 14; 5712 // cnt -= scratch1; 5713 // p += scratch1; 5714 // switch (scratch1 / 2) { 5715 // do { 5716 // cnt -= 16; 5717 // p[-16] = v; 5718 // p[-15] = v; 5719 // case 7: 5720 // p[-14] = v; 5721 // p[-13] = v; 5722 // case 6: 5723 // p[-12] = v; 5724 // p[-11] = v; 5725 // // ... 5726 // case 1: 5727 // p[-2] = v; 5728 // p[-1] = v; 5729 // case 0: 5730 // p += 16; 5731 // } while (cnt); 5732 // } 5733 // if ((cnt & 1) == 1) { 5734 // *p++ = v; 5735 // } 5736 5737 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 5738 5739 Label fini, skip, entry, loop; 5740 const int unroll = 8; // Number of stp instructions we'll unroll 5741 5742 cbz(cnt, fini); 5743 tbz(base, 3, skip); 5744 str(value, Address(post(base, 8))); 5745 sub(cnt, cnt, 1); 5746 bind(skip); 5747 5748 andr(rscratch1, cnt, (unroll-1) * 2); 5749 sub(cnt, cnt, rscratch1); 5750 add(base, base, rscratch1, Assembler::LSL, 3); 5751 adr(rscratch2, entry); 5752 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 5753 br(rscratch2); 5754 5755 bind(loop); 5756 add(base, base, unroll * 16); 5757 for (int i = -unroll; i < 0; i++) 5758 stp(value, value, Address(base, i * 16)); 5759 bind(entry); 5760 subs(cnt, cnt, unroll * 2); 5761 br(Assembler::GE, loop); 5762 5763 tbz(cnt, 0, fini); 5764 str(value, Address(post(base, 8))); 5765 bind(fini); 5766 } 5767 5768 // Intrinsic for 5769 // 5770 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 5771 // return the number of characters copied. 5772 // - java/lang/StringUTF16.compress 5773 // return index of non-latin1 character if copy fails, otherwise 'len'. 5774 // 5775 // This version always returns the number of characters copied, and does not 5776 // clobber the 'len' register. A successful copy will complete with the post- 5777 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 5778 // post-condition: 0 <= 'res' < 'len'. 5779 // 5780 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 5781 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 5782 // beyond the acceptable, even though the footprint would be smaller. 5783 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 5784 // avoid additional bloat. 5785 // 5786 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 5787 void MacroAssembler::encode_iso_array(Register src, Register dst, 5788 Register len, Register res, bool ascii, 5789 FloatRegister vtmp0, FloatRegister vtmp1, 5790 FloatRegister vtmp2, FloatRegister vtmp3, 5791 FloatRegister vtmp4, FloatRegister vtmp5) 5792 { 5793 Register cnt = res; 5794 Register max = rscratch1; 5795 Register chk = rscratch2; 5796 5797 prfm(Address(src), PLDL1STRM); 5798 movw(cnt, len); 5799 5800 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 5801 5802 Label LOOP_32, DONE_32, FAIL_32; 5803 5804 BIND(LOOP_32); 5805 { 5806 cmpw(cnt, 32); 5807 br(LT, DONE_32); 5808 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 5809 // Extract lower bytes. 5810 FloatRegister vlo0 = vtmp4; 5811 FloatRegister vlo1 = vtmp5; 5812 uzp1(vlo0, T16B, vtmp0, vtmp1); 5813 uzp1(vlo1, T16B, vtmp2, vtmp3); 5814 // Merge bits... 5815 orr(vtmp0, T16B, vtmp0, vtmp1); 5816 orr(vtmp2, T16B, vtmp2, vtmp3); 5817 // Extract merged upper bytes. 5818 FloatRegister vhix = vtmp0; 5819 uzp2(vhix, T16B, vtmp0, vtmp2); 5820 // ISO-check on hi-parts (all zero). 5821 // ASCII-check on lo-parts (no sign). 5822 FloatRegister vlox = vtmp1; // Merge lower bytes. 5823 ASCII(orr(vlox, T16B, vlo0, vlo1)); 5824 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 5825 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 5826 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 5827 ASCII(orr(chk, chk, max)); 5828 cbnz(chk, FAIL_32); 5829 subw(cnt, cnt, 32); 5830 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 5831 b(LOOP_32); 5832 } 5833 BIND(FAIL_32); 5834 sub(src, src, 64); 5835 BIND(DONE_32); 5836 5837 Label LOOP_8, SKIP_8; 5838 5839 BIND(LOOP_8); 5840 { 5841 cmpw(cnt, 8); 5842 br(LT, SKIP_8); 5843 FloatRegister vhi = vtmp0; 5844 FloatRegister vlo = vtmp1; 5845 ld1(vtmp3, T8H, src); 5846 uzp1(vlo, T16B, vtmp3, vtmp3); 5847 uzp2(vhi, T16B, vtmp3, vtmp3); 5848 // ISO-check on hi-parts (all zero). 5849 // ASCII-check on lo-parts (no sign). 5850 ASCII(cm(LT, vtmp2, T16B, vlo)); 5851 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 5852 ASCII(umov(max, vtmp2, B, 0)); 5853 ASCII(orr(chk, chk, max)); 5854 cbnz(chk, SKIP_8); 5855 5856 strd(vlo, Address(post(dst, 8))); 5857 subw(cnt, cnt, 8); 5858 add(src, src, 16); 5859 b(LOOP_8); 5860 } 5861 BIND(SKIP_8); 5862 5863 #undef ASCII 5864 5865 Label LOOP, DONE; 5866 5867 cbz(cnt, DONE); 5868 BIND(LOOP); 5869 { 5870 Register chr = rscratch1; 5871 ldrh(chr, Address(post(src, 2))); 5872 tst(chr, ascii ? 0xff80 : 0xff00); 5873 br(NE, DONE); 5874 strb(chr, Address(post(dst, 1))); 5875 subs(cnt, cnt, 1); 5876 br(GT, LOOP); 5877 } 5878 BIND(DONE); 5879 // Return index where we stopped. 5880 subw(res, len, cnt); 5881 } 5882 5883 // Inflate byte[] array to char[]. 5884 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 5885 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 5886 FloatRegister vtmp1, FloatRegister vtmp2, 5887 FloatRegister vtmp3, Register tmp4) { 5888 Label big, done, after_init, to_stub; 5889 5890 assert_different_registers(src, dst, len, tmp4, rscratch1); 5891 5892 fmovd(vtmp1, 0.0); 5893 lsrw(tmp4, len, 3); 5894 bind(after_init); 5895 cbnzw(tmp4, big); 5896 // Short string: less than 8 bytes. 5897 { 5898 Label loop, tiny; 5899 5900 cmpw(len, 4); 5901 br(LT, tiny); 5902 // Use SIMD to do 4 bytes. 5903 ldrs(vtmp2, post(src, 4)); 5904 zip1(vtmp3, T8B, vtmp2, vtmp1); 5905 subw(len, len, 4); 5906 strd(vtmp3, post(dst, 8)); 5907 5908 cbzw(len, done); 5909 5910 // Do the remaining bytes by steam. 5911 bind(loop); 5912 ldrb(tmp4, post(src, 1)); 5913 strh(tmp4, post(dst, 2)); 5914 subw(len, len, 1); 5915 5916 bind(tiny); 5917 cbnz(len, loop); 5918 5919 b(done); 5920 } 5921 5922 if (SoftwarePrefetchHintDistance >= 0) { 5923 bind(to_stub); 5924 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 5925 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 5926 address tpc = trampoline_call(stub); 5927 if (tpc == nullptr) { 5928 DEBUG_ONLY(reset_labels(big, done)); 5929 postcond(pc() == badAddress); 5930 return nullptr; 5931 } 5932 b(after_init); 5933 } 5934 5935 // Unpack the bytes 8 at a time. 5936 bind(big); 5937 { 5938 Label loop, around, loop_last, loop_start; 5939 5940 if (SoftwarePrefetchHintDistance >= 0) { 5941 const int large_loop_threshold = (64 + 16)/8; 5942 ldrd(vtmp2, post(src, 8)); 5943 andw(len, len, 7); 5944 cmp(tmp4, (u1)large_loop_threshold); 5945 br(GE, to_stub); 5946 b(loop_start); 5947 5948 bind(loop); 5949 ldrd(vtmp2, post(src, 8)); 5950 bind(loop_start); 5951 subs(tmp4, tmp4, 1); 5952 br(EQ, loop_last); 5953 zip1(vtmp2, T16B, vtmp2, vtmp1); 5954 ldrd(vtmp3, post(src, 8)); 5955 st1(vtmp2, T8H, post(dst, 16)); 5956 subs(tmp4, tmp4, 1); 5957 zip1(vtmp3, T16B, vtmp3, vtmp1); 5958 st1(vtmp3, T8H, post(dst, 16)); 5959 br(NE, loop); 5960 b(around); 5961 bind(loop_last); 5962 zip1(vtmp2, T16B, vtmp2, vtmp1); 5963 st1(vtmp2, T8H, post(dst, 16)); 5964 bind(around); 5965 cbz(len, done); 5966 } else { 5967 andw(len, len, 7); 5968 bind(loop); 5969 ldrd(vtmp2, post(src, 8)); 5970 sub(tmp4, tmp4, 1); 5971 zip1(vtmp3, T16B, vtmp2, vtmp1); 5972 st1(vtmp3, T8H, post(dst, 16)); 5973 cbnz(tmp4, loop); 5974 } 5975 } 5976 5977 // Do the tail of up to 8 bytes. 5978 add(src, src, len); 5979 ldrd(vtmp3, Address(src, -8)); 5980 add(dst, dst, len, ext::uxtw, 1); 5981 zip1(vtmp3, T16B, vtmp3, vtmp1); 5982 strq(vtmp3, Address(dst, -16)); 5983 5984 bind(done); 5985 postcond(pc() != badAddress); 5986 return pc(); 5987 } 5988 5989 // Compress char[] array to byte[]. 5990 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 5991 // Return the array length if every element in array can be encoded, 5992 // otherwise, the index of first non-latin1 (> 0xff) character. 5993 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 5994 Register res, 5995 FloatRegister tmp0, FloatRegister tmp1, 5996 FloatRegister tmp2, FloatRegister tmp3, 5997 FloatRegister tmp4, FloatRegister tmp5) { 5998 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 5999 } 6000 6001 // java.math.round(double a) 6002 // Returns the closest long to the argument, with ties rounding to 6003 // positive infinity. This requires some fiddling for corner 6004 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6005 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6006 FloatRegister ftmp) { 6007 Label DONE; 6008 BLOCK_COMMENT("java_round_double: { "); 6009 fmovd(rscratch1, src); 6010 // Use RoundToNearestTiesAway unless src small and -ve. 6011 fcvtasd(dst, src); 6012 // Test if src >= 0 || abs(src) >= 0x1.0p52 6013 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6014 mov(rscratch2, julong_cast(0x1.0p52)); 6015 cmp(rscratch1, rscratch2); 6016 br(HS, DONE); { 6017 // src < 0 && abs(src) < 0x1.0p52 6018 // src may have a fractional part, so add 0.5 6019 fmovd(ftmp, 0.5); 6020 faddd(ftmp, src, ftmp); 6021 // Convert double to jlong, use RoundTowardsNegative 6022 fcvtmsd(dst, ftmp); 6023 } 6024 bind(DONE); 6025 BLOCK_COMMENT("} java_round_double"); 6026 } 6027 6028 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6029 FloatRegister ftmp) { 6030 Label DONE; 6031 BLOCK_COMMENT("java_round_float: { "); 6032 fmovs(rscratch1, src); 6033 // Use RoundToNearestTiesAway unless src small and -ve. 6034 fcvtassw(dst, src); 6035 // Test if src >= 0 || abs(src) >= 0x1.0p23 6036 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6037 mov(rscratch2, jint_cast(0x1.0p23f)); 6038 cmp(rscratch1, rscratch2); 6039 br(HS, DONE); { 6040 // src < 0 && |src| < 0x1.0p23 6041 // src may have a fractional part, so add 0.5 6042 fmovs(ftmp, 0.5f); 6043 fadds(ftmp, src, ftmp); 6044 // Convert float to jint, use RoundTowardsNegative 6045 fcvtmssw(dst, ftmp); 6046 } 6047 bind(DONE); 6048 BLOCK_COMMENT("} java_round_float"); 6049 } 6050 6051 // get_thread() can be called anywhere inside generated code so we 6052 // need to save whatever non-callee save context might get clobbered 6053 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6054 // the call setup code. 6055 // 6056 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6057 // On other systems, the helper is a usual C function. 6058 // 6059 void MacroAssembler::get_thread(Register dst) { 6060 RegSet saved_regs = 6061 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6062 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6063 6064 protect_return_address(); 6065 push(saved_regs, sp); 6066 6067 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6068 blr(lr); 6069 if (dst != c_rarg0) { 6070 mov(dst, c_rarg0); 6071 } 6072 6073 pop(saved_regs, sp); 6074 authenticate_return_address(); 6075 } 6076 6077 void MacroAssembler::cache_wb(Address line) { 6078 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6079 assert(line.index() == noreg, "index should be noreg"); 6080 assert(line.offset() == 0, "offset should be 0"); 6081 // would like to assert this 6082 // assert(line._ext.shift == 0, "shift should be zero"); 6083 if (VM_Version::supports_dcpop()) { 6084 // writeback using clear virtual address to point of persistence 6085 dc(Assembler::CVAP, line.base()); 6086 } else { 6087 // no need to generate anything as Unsafe.writebackMemory should 6088 // never invoke this stub 6089 } 6090 } 6091 6092 void MacroAssembler::cache_wbsync(bool is_pre) { 6093 // we only need a barrier post sync 6094 if (!is_pre) { 6095 membar(Assembler::AnyAny); 6096 } 6097 } 6098 6099 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6100 // Make sure that native code does not change SVE vector length. 6101 if (!UseSVE) return; 6102 Label verify_ok; 6103 movw(tmp, zr); 6104 sve_inc(tmp, B); 6105 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6106 br(EQ, verify_ok); 6107 stop("Error: SVE vector length has changed since jvm startup"); 6108 bind(verify_ok); 6109 } 6110 6111 void MacroAssembler::verify_ptrue() { 6112 Label verify_ok; 6113 if (!UseSVE) { 6114 return; 6115 } 6116 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6117 sve_dec(rscratch1, B); 6118 cbz(rscratch1, verify_ok); 6119 stop("Error: the preserved predicate register (p7) elements are not all true"); 6120 bind(verify_ok); 6121 } 6122 6123 void MacroAssembler::safepoint_isb() { 6124 isb(); 6125 #ifndef PRODUCT 6126 if (VerifyCrossModifyFence) { 6127 // Clear the thread state. 6128 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6129 } 6130 #endif 6131 } 6132 6133 #ifndef PRODUCT 6134 void MacroAssembler::verify_cross_modify_fence_not_required() { 6135 if (VerifyCrossModifyFence) { 6136 // Check if thread needs a cross modify fence. 6137 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6138 Label fence_not_required; 6139 cbz(rscratch1, fence_not_required); 6140 // If it does then fail. 6141 lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)); 6142 mov(c_rarg0, rthread); 6143 blr(rscratch1); 6144 bind(fence_not_required); 6145 } 6146 } 6147 #endif 6148 6149 void MacroAssembler::spin_wait() { 6150 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6151 switch (VM_Version::spin_wait_desc().inst()) { 6152 case SpinWait::NOP: 6153 nop(); 6154 break; 6155 case SpinWait::ISB: 6156 isb(); 6157 break; 6158 case SpinWait::YIELD: 6159 yield(); 6160 break; 6161 default: 6162 ShouldNotReachHere(); 6163 } 6164 } 6165 } 6166 6167 // Stack frame creation/removal 6168 6169 void MacroAssembler::enter(bool strip_ret_addr) { 6170 if (strip_ret_addr) { 6171 // Addresses can only be signed once. If there are multiple nested frames being created 6172 // in the same function, then the return address needs stripping first. 6173 strip_return_address(); 6174 } 6175 protect_return_address(); 6176 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6177 mov(rfp, sp); 6178 } 6179 6180 void MacroAssembler::leave() { 6181 mov(sp, rfp); 6182 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6183 authenticate_return_address(); 6184 } 6185 6186 // ROP Protection 6187 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6188 // destroying stack frames or whenever directly loading/storing the LR to memory. 6189 // If ROP protection is not set then these functions are no-ops. 6190 // For more details on PAC see pauth_aarch64.hpp. 6191 6192 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6193 // Uses value zero as the modifier. 6194 // 6195 void MacroAssembler::protect_return_address() { 6196 if (VM_Version::use_rop_protection()) { 6197 check_return_address(); 6198 paciaz(); 6199 } 6200 } 6201 6202 // Sign the return value in the given register. Use before updating the LR in the existing stack 6203 // frame for the current function. 6204 // Uses value zero as the modifier. 6205 // 6206 void MacroAssembler::protect_return_address(Register return_reg) { 6207 if (VM_Version::use_rop_protection()) { 6208 check_return_address(return_reg); 6209 paciza(return_reg); 6210 } 6211 } 6212 6213 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6214 // Uses value zero as the modifier. 6215 // 6216 void MacroAssembler::authenticate_return_address() { 6217 if (VM_Version::use_rop_protection()) { 6218 autiaz(); 6219 check_return_address(); 6220 } 6221 } 6222 6223 // Authenticate the return value in the given register. Use before updating the LR in the existing 6224 // stack frame for the current function. 6225 // Uses value zero as the modifier. 6226 // 6227 void MacroAssembler::authenticate_return_address(Register return_reg) { 6228 if (VM_Version::use_rop_protection()) { 6229 autiza(return_reg); 6230 check_return_address(return_reg); 6231 } 6232 } 6233 6234 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6235 // there is no guaranteed way of authenticating the LR. 6236 // 6237 void MacroAssembler::strip_return_address() { 6238 if (VM_Version::use_rop_protection()) { 6239 xpaclri(); 6240 } 6241 } 6242 6243 #ifndef PRODUCT 6244 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6245 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6246 // it is difficult to debug back to the callee function. 6247 // This function simply loads from the address in the given register. 6248 // Use directly after authentication to catch authentication failures. 6249 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6250 // 6251 void MacroAssembler::check_return_address(Register return_reg) { 6252 if (VM_Version::use_rop_protection()) { 6253 ldr(zr, Address(return_reg)); 6254 } 6255 } 6256 #endif 6257 6258 // The java_calling_convention describes stack locations as ideal slots on 6259 // a frame with no abi restrictions. Since we must observe abi restrictions 6260 // (like the placement of the register window) the slots must be biased by 6261 // the following value. 6262 static int reg2offset_in(VMReg r) { 6263 // Account for saved rfp and lr 6264 // This should really be in_preserve_stack_slots 6265 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6266 } 6267 6268 static int reg2offset_out(VMReg r) { 6269 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6270 } 6271 6272 // On 64bit we will store integer like items to the stack as 6273 // 64bits items (AArch64 ABI) even though java would only store 6274 // 32bits for a parameter. On 32bit it will simply be 32bits 6275 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6276 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6277 if (src.first()->is_stack()) { 6278 if (dst.first()->is_stack()) { 6279 // stack to stack 6280 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6281 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6282 } else { 6283 // stack to reg 6284 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6285 } 6286 } else if (dst.first()->is_stack()) { 6287 // reg to stack 6288 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6289 } else { 6290 if (dst.first() != src.first()) { 6291 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6292 } 6293 } 6294 } 6295 6296 // An oop arg. Must pass a handle not the oop itself 6297 void MacroAssembler::object_move( 6298 OopMap* map, 6299 int oop_handle_offset, 6300 int framesize_in_slots, 6301 VMRegPair src, 6302 VMRegPair dst, 6303 bool is_receiver, 6304 int* receiver_offset) { 6305 6306 // must pass a handle. First figure out the location we use as a handle 6307 6308 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6309 6310 // See if oop is null if it is we need no handle 6311 6312 if (src.first()->is_stack()) { 6313 6314 // Oop is already on the stack as an argument 6315 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6316 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6317 if (is_receiver) { 6318 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6319 } 6320 6321 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6322 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6323 // conditionally move a null 6324 cmp(rscratch1, zr); 6325 csel(rHandle, zr, rHandle, Assembler::EQ); 6326 } else { 6327 6328 // Oop is in an a register we must store it to the space we reserve 6329 // on the stack for oop_handles and pass a handle if oop is non-null 6330 6331 const Register rOop = src.first()->as_Register(); 6332 int oop_slot; 6333 if (rOop == j_rarg0) 6334 oop_slot = 0; 6335 else if (rOop == j_rarg1) 6336 oop_slot = 1; 6337 else if (rOop == j_rarg2) 6338 oop_slot = 2; 6339 else if (rOop == j_rarg3) 6340 oop_slot = 3; 6341 else if (rOop == j_rarg4) 6342 oop_slot = 4; 6343 else if (rOop == j_rarg5) 6344 oop_slot = 5; 6345 else if (rOop == j_rarg6) 6346 oop_slot = 6; 6347 else { 6348 assert(rOop == j_rarg7, "wrong register"); 6349 oop_slot = 7; 6350 } 6351 6352 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6353 int offset = oop_slot*VMRegImpl::stack_slot_size; 6354 6355 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6356 // Store oop in handle area, may be null 6357 str(rOop, Address(sp, offset)); 6358 if (is_receiver) { 6359 *receiver_offset = offset; 6360 } 6361 6362 cmp(rOop, zr); 6363 lea(rHandle, Address(sp, offset)); 6364 // conditionally move a null 6365 csel(rHandle, zr, rHandle, Assembler::EQ); 6366 } 6367 6368 // If arg is on the stack then place it otherwise it is already in correct reg. 6369 if (dst.first()->is_stack()) { 6370 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 6371 } 6372 } 6373 6374 // A float arg may have to do float reg int reg conversion 6375 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 6376 if (src.first()->is_stack()) { 6377 if (dst.first()->is_stack()) { 6378 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 6379 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 6380 } else { 6381 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6382 } 6383 } else if (src.first() != dst.first()) { 6384 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6385 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6386 else 6387 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6388 } 6389 } 6390 6391 // A long move 6392 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 6393 if (src.first()->is_stack()) { 6394 if (dst.first()->is_stack()) { 6395 // stack to stack 6396 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6397 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6398 } else { 6399 // stack to reg 6400 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6401 } 6402 } else if (dst.first()->is_stack()) { 6403 // reg to stack 6404 // Do we really have to sign extend??? 6405 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 6406 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6407 } else { 6408 if (dst.first() != src.first()) { 6409 mov(dst.first()->as_Register(), src.first()->as_Register()); 6410 } 6411 } 6412 } 6413 6414 6415 // A double move 6416 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 6417 if (src.first()->is_stack()) { 6418 if (dst.first()->is_stack()) { 6419 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6420 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6421 } else { 6422 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 6423 } 6424 } else if (src.first() != dst.first()) { 6425 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 6426 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 6427 else 6428 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 6429 } 6430 } 6431 6432 // Implements lightweight-locking. 6433 // 6434 // - obj: the object to be locked 6435 // - t1, t2, t3: temporary registers, will be destroyed 6436 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 6437 void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6438 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6439 assert_different_registers(obj, t1, t2, t3, rscratch1); 6440 6441 Label push; 6442 const Register top = t1; 6443 const Register mark = t2; 6444 const Register t = t3; 6445 6446 // Preload the markWord. It is important that this is the first 6447 // instruction emitted as it is part of C1's null check semantics. 6448 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6449 6450 // Check if the lock-stack is full. 6451 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6452 cmpw(top, (unsigned)LockStack::end_offset()); 6453 br(Assembler::GE, slow); 6454 6455 // Check for recursion. 6456 subw(t, top, oopSize); 6457 ldr(t, Address(rthread, t)); 6458 cmp(obj, t); 6459 br(Assembler::EQ, push); 6460 6461 // Check header for monitor (0b10). 6462 tst(mark, markWord::monitor_value); 6463 br(Assembler::NE, slow); 6464 6465 // Try to lock. Transition lock bits 0b01 => 0b00 6466 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6467 orr(mark, mark, markWord::unlocked_value); 6468 eor(t, mark, markWord::unlocked_value); 6469 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 6470 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 6471 br(Assembler::NE, slow); 6472 6473 bind(push); 6474 // After successful lock, push object on lock-stack. 6475 str(obj, Address(rthread, top)); 6476 addw(top, top, oopSize); 6477 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6478 } 6479 6480 // Implements lightweight-unlocking. 6481 // 6482 // - obj: the object to be unlocked 6483 // - t1, t2, t3: temporary registers 6484 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 6485 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 6486 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 6487 // cmpxchg clobbers rscratch1. 6488 assert_different_registers(obj, t1, t2, t3, rscratch1); 6489 6490 #ifdef ASSERT 6491 { 6492 // Check for lock-stack underflow. 6493 Label stack_ok; 6494 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 6495 cmpw(t1, (unsigned)LockStack::start_offset()); 6496 br(Assembler::GE, stack_ok); 6497 STOP("Lock-stack underflow"); 6498 bind(stack_ok); 6499 } 6500 #endif 6501 6502 Label unlocked, push_and_slow; 6503 const Register top = t1; 6504 const Register mark = t2; 6505 const Register t = t3; 6506 6507 // Check if obj is top of lock-stack. 6508 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6509 subw(top, top, oopSize); 6510 ldr(t, Address(rthread, top)); 6511 cmp(obj, t); 6512 br(Assembler::NE, slow); 6513 6514 // Pop lock-stack. 6515 DEBUG_ONLY(str(zr, Address(rthread, top));) 6516 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6517 6518 // Check if recursive. 6519 subw(t, top, oopSize); 6520 ldr(t, Address(rthread, t)); 6521 cmp(obj, t); 6522 br(Assembler::EQ, unlocked); 6523 6524 // Not recursive. Check header for monitor (0b10). 6525 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 6526 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 6527 6528 #ifdef ASSERT 6529 // Check header not unlocked (0b01). 6530 Label not_unlocked; 6531 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 6532 stop("lightweight_unlock already unlocked"); 6533 bind(not_unlocked); 6534 #endif 6535 6536 // Try to unlock. Transition lock bits 0b00 => 0b01 6537 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 6538 orr(t, mark, markWord::unlocked_value); 6539 cmpxchg(obj, mark, t, Assembler::xword, 6540 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 6541 br(Assembler::EQ, unlocked); 6542 6543 bind(push_and_slow); 6544 // Restore lock-stack and handle the unlock in runtime. 6545 DEBUG_ONLY(str(obj, Address(rthread, top));) 6546 addw(top, top, oopSize); 6547 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 6548 b(slow); 6549 6550 bind(unlocked); 6551 }