1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "ci/ciEnv.hpp" 32 #include "ci/ciInlineKlass.hpp" 33 #include "code/compiledIC.hpp" 34 #include "compiler/compileTask.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "compiler/oopMap.hpp" 37 #include "gc/shared/barrierSet.hpp" 38 #include "gc/shared/barrierSetAssembler.hpp" 39 #include "gc/shared/cardTableBarrierSet.hpp" 40 #include "gc/shared/cardTable.hpp" 41 #include "gc/shared/collectedHeap.hpp" 42 #include "gc/shared/tlab_globals.hpp" 43 #include "interpreter/bytecodeHistogram.hpp" 44 #include "interpreter/interpreter.hpp" 45 #include "jvm.h" 46 #include "memory/resourceArea.hpp" 47 #include "memory/universe.hpp" 48 #include "nativeInst_aarch64.hpp" 49 #include "oops/accessDecorators.hpp" 50 #include "oops/compressedKlass.inline.hpp" 51 #include "oops/compressedOops.inline.hpp" 52 #include "oops/klass.inline.hpp" 53 #include "oops/resolvedFieldEntry.hpp" 54 #include "runtime/continuation.hpp" 55 #include "runtime/icache.hpp" 56 #include "runtime/interfaceSupport.inline.hpp" 57 #include "runtime/javaThread.hpp" 58 #include "runtime/jniHandles.inline.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/signature_cc.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "utilities/powerOfTwo.hpp" 63 #include "vmreg_aarch64.inline.hpp" 64 #ifdef COMPILER1 65 #include "c1/c1_LIRAssembler.hpp" 66 #endif 67 #ifdef COMPILER2 68 #include "oops/oop.hpp" 69 #include "opto/compile.hpp" 70 #include "opto/node.hpp" 71 #include "opto/output.hpp" 72 #endif 73 74 #ifdef PRODUCT 75 #define BLOCK_COMMENT(str) /* nothing */ 76 #else 77 #define BLOCK_COMMENT(str) block_comment(str) 78 #endif 79 #define STOP(str) stop(str); 80 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 81 82 #ifdef ASSERT 83 extern "C" void disnm(intptr_t p); 84 #endif 85 // Target-dependent relocation processing 86 // 87 // Instruction sequences whose target may need to be retrieved or 88 // patched are distinguished by their leading instruction, sorting 89 // them into three main instruction groups and related subgroups. 90 // 91 // 1) Branch, Exception and System (insn count = 1) 92 // 1a) Unconditional branch (immediate): 93 // b/bl imm19 94 // 1b) Compare & branch (immediate): 95 // cbz/cbnz Rt imm19 96 // 1c) Test & branch (immediate): 97 // tbz/tbnz Rt imm14 98 // 1d) Conditional branch (immediate): 99 // b.cond imm19 100 // 101 // 2) Loads and Stores (insn count = 1) 102 // 2a) Load register literal: 103 // ldr Rt imm19 104 // 105 // 3) Data Processing Immediate (insn count = 2 or 3) 106 // 3a) PC-rel. addressing 107 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 108 // adr/adrp Rx imm21; add Ry Rx #imm12 109 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 110 // adr/adrp Rx imm21 111 // adr/adrp Rx imm21; movk Rx #imm16<<32 112 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 113 // The latter form can only happen when the target is an 114 // ExternalAddress, and (by definition) ExternalAddresses don't 115 // move. Because of that property, there is never any need to 116 // patch the last of the three instructions. However, 117 // MacroAssembler::target_addr_for_insn takes all three 118 // instructions into account and returns the correct address. 119 // 3b) Move wide (immediate) 120 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 121 // 122 // A switch on a subset of the instruction's bits provides an 123 // efficient dispatch to these subcases. 124 // 125 // insn[28:26] -> main group ('x' == don't care) 126 // 00x -> UNALLOCATED 127 // 100 -> Data Processing Immediate 128 // 101 -> Branch, Exception and System 129 // x1x -> Loads and Stores 130 // 131 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 132 // n.b. in some cases extra bits need to be checked to verify the 133 // instruction is as expected 134 // 135 // 1) ... xx101x Branch, Exception and System 136 // 1a) 00___x Unconditional branch (immediate) 137 // 1b) 01___0 Compare & branch (immediate) 138 // 1c) 01___1 Test & branch (immediate) 139 // 1d) 10___0 Conditional branch (immediate) 140 // other Should not happen 141 // 142 // 2) ... xxx1x0 Loads and Stores 143 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 144 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 145 // strictly should be 64 bit non-FP/SIMD i.e. 146 // 0101_000 (i.e. requires insn[31:24] == 01011000) 147 // 148 // 3) ... xx100x Data Processing Immediate 149 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 150 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 151 // strictly should be 64 bit movz #imm16<<0 152 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 153 // 154 class RelocActions { 155 protected: 156 typedef int (*reloc_insn)(address insn_addr, address &target); 157 158 virtual reloc_insn adrpMem() = 0; 159 virtual reloc_insn adrpAdd() = 0; 160 virtual reloc_insn adrpMovk() = 0; 161 162 const address _insn_addr; 163 const uint32_t _insn; 164 165 static uint32_t insn_at(address insn_addr, int n) { 166 return ((uint32_t*)insn_addr)[n]; 167 } 168 uint32_t insn_at(int n) const { 169 return insn_at(_insn_addr, n); 170 } 171 172 public: 173 174 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 175 RelocActions(address insn_addr, uint32_t insn) 176 : _insn_addr(insn_addr), _insn(insn) {} 177 178 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 179 virtual int conditionalBranch(address insn_addr, address &target) = 0; 180 virtual int testAndBranch(address insn_addr, address &target) = 0; 181 virtual int loadStore(address insn_addr, address &target) = 0; 182 virtual int adr(address insn_addr, address &target) = 0; 183 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 184 virtual int immediate(address insn_addr, address &target) = 0; 185 virtual void verify(address insn_addr, address &target) = 0; 186 187 int ALWAYSINLINE run(address insn_addr, address &target) { 188 int instructions = 1; 189 190 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 191 switch(dispatch) { 192 case 0b001010: 193 case 0b001011: { 194 instructions = unconditionalBranch(insn_addr, target); 195 break; 196 } 197 case 0b101010: // Conditional branch (immediate) 198 case 0b011010: { // Compare & branch (immediate) 199 instructions = conditionalBranch(insn_addr, target); 200 break; 201 } 202 case 0b011011: { 203 instructions = testAndBranch(insn_addr, target); 204 break; 205 } 206 case 0b001100: 207 case 0b001110: 208 case 0b011100: 209 case 0b011110: 210 case 0b101100: 211 case 0b101110: 212 case 0b111100: 213 case 0b111110: { 214 // load/store 215 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 216 // Load register (literal) 217 instructions = loadStore(insn_addr, target); 218 break; 219 } else { 220 // nothing to do 221 assert(target == 0, "did not expect to relocate target for polling page load"); 222 } 223 break; 224 } 225 case 0b001000: 226 case 0b011000: 227 case 0b101000: 228 case 0b111000: { 229 // adr/adrp 230 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 231 int shift = Instruction_aarch64::extract(_insn, 31, 31); 232 if (shift) { 233 uint32_t insn2 = insn_at(1); 234 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 235 Instruction_aarch64::extract(_insn, 4, 0) == 236 Instruction_aarch64::extract(insn2, 9, 5)) { 237 instructions = adrp(insn_addr, target, adrpMem()); 238 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 239 Instruction_aarch64::extract(_insn, 4, 0) == 240 Instruction_aarch64::extract(insn2, 4, 0)) { 241 instructions = adrp(insn_addr, target, adrpAdd()); 242 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 243 Instruction_aarch64::extract(_insn, 4, 0) == 244 Instruction_aarch64::extract(insn2, 4, 0)) { 245 instructions = adrp(insn_addr, target, adrpMovk()); 246 } else { 247 ShouldNotReachHere(); 248 } 249 } else { 250 instructions = adr(insn_addr, target); 251 } 252 break; 253 } 254 case 0b001001: 255 case 0b011001: 256 case 0b101001: 257 case 0b111001: { 258 instructions = immediate(insn_addr, target); 259 break; 260 } 261 default: { 262 ShouldNotReachHere(); 263 } 264 } 265 266 verify(insn_addr, target); 267 return instructions * NativeInstruction::instruction_size; 268 } 269 }; 270 271 class Patcher : public RelocActions { 272 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 273 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 274 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 275 276 public: 277 Patcher(address insn_addr) : RelocActions(insn_addr) {} 278 279 virtual int unconditionalBranch(address insn_addr, address &target) { 280 intptr_t offset = (target - insn_addr) >> 2; 281 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 282 return 1; 283 } 284 virtual int conditionalBranch(address insn_addr, address &target) { 285 intptr_t offset = (target - insn_addr) >> 2; 286 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 287 return 1; 288 } 289 virtual int testAndBranch(address insn_addr, address &target) { 290 intptr_t offset = (target - insn_addr) >> 2; 291 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 292 return 1; 293 } 294 virtual int loadStore(address insn_addr, address &target) { 295 intptr_t offset = (target - insn_addr) >> 2; 296 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 297 return 1; 298 } 299 virtual int adr(address insn_addr, address &target) { 300 #ifdef ASSERT 301 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 302 #endif 303 // PC-rel. addressing 304 ptrdiff_t offset = target - insn_addr; 305 int offset_lo = offset & 3; 306 offset >>= 2; 307 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 308 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 309 return 1; 310 } 311 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 312 int instructions = 1; 313 #ifdef ASSERT 314 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 315 #endif 316 ptrdiff_t offset = target - insn_addr; 317 instructions = 2; 318 precond(inner != nullptr); 319 // Give the inner reloc a chance to modify the target. 320 address adjusted_target = target; 321 instructions = (*inner)(insn_addr, adjusted_target); 322 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 323 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 324 offset = adr_page - pc_page; 325 int offset_lo = offset & 3; 326 offset >>= 2; 327 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 328 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 329 return instructions; 330 } 331 static int adrpMem_impl(address insn_addr, address &target) { 332 uintptr_t dest = (uintptr_t)target; 333 int offset_lo = dest & 0xfff; 334 uint32_t insn2 = insn_at(insn_addr, 1); 335 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 336 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 337 guarantee(((dest >> size) << size) == dest, "misaligned target"); 338 return 2; 339 } 340 static int adrpAdd_impl(address insn_addr, address &target) { 341 uintptr_t dest = (uintptr_t)target; 342 int offset_lo = dest & 0xfff; 343 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 344 return 2; 345 } 346 static int adrpMovk_impl(address insn_addr, address &target) { 347 uintptr_t dest = uintptr_t(target); 348 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 349 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 350 target = address(dest); 351 return 2; 352 } 353 virtual int immediate(address insn_addr, address &target) { 354 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 355 uint64_t dest = (uint64_t)target; 356 // Move wide constant 357 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 358 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 359 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 360 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 361 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 362 return 3; 363 } 364 virtual void verify(address insn_addr, address &target) { 365 #ifdef ASSERT 366 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 367 if (!(address_is == target)) { 368 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 369 disnm((intptr_t)insn_addr); 370 assert(address_is == target, "should be"); 371 } 372 #endif 373 } 374 }; 375 376 // If insn1 and insn2 use the same register to form an address, either 377 // by an offsetted LDR or a simple ADD, return the offset. If the 378 // second instruction is an LDR, the offset may be scaled. 379 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 380 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 381 Instruction_aarch64::extract(insn1, 4, 0) == 382 Instruction_aarch64::extract(insn2, 9, 5)) { 383 // Load/store register (unsigned immediate) 384 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 385 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 386 byte_offset <<= size; 387 return true; 388 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 389 Instruction_aarch64::extract(insn1, 4, 0) == 390 Instruction_aarch64::extract(insn2, 4, 0)) { 391 // add (immediate) 392 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 393 return true; 394 } 395 return false; 396 } 397 398 class Decoder : public RelocActions { 399 virtual reloc_insn adrpMem() { return &Decoder::adrpMem_impl; } 400 virtual reloc_insn adrpAdd() { return &Decoder::adrpAdd_impl; } 401 virtual reloc_insn adrpMovk() { return &Decoder::adrpMovk_impl; } 402 403 public: 404 Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 405 406 virtual int loadStore(address insn_addr, address &target) { 407 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 408 target = insn_addr + (offset << 2); 409 return 1; 410 } 411 virtual int unconditionalBranch(address insn_addr, address &target) { 412 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 413 target = insn_addr + (offset << 2); 414 return 1; 415 } 416 virtual int conditionalBranch(address insn_addr, address &target) { 417 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 418 target = address(((uint64_t)insn_addr + (offset << 2))); 419 return 1; 420 } 421 virtual int testAndBranch(address insn_addr, address &target) { 422 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 423 target = address(((uint64_t)insn_addr + (offset << 2))); 424 return 1; 425 } 426 virtual int adr(address insn_addr, address &target) { 427 // PC-rel. addressing 428 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 429 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 430 target = address((uint64_t)insn_addr + offset); 431 return 1; 432 } 433 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 434 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 435 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 436 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 437 int shift = 12; 438 offset <<= shift; 439 uint64_t target_page = ((uint64_t)insn_addr) + offset; 440 target_page &= ((uint64_t)-1) << shift; 441 uint32_t insn2 = insn_at(1); 442 target = address(target_page); 443 precond(inner != nullptr); 444 (*inner)(insn_addr, target); 445 return 2; 446 } 447 static int adrpMem_impl(address insn_addr, address &target) { 448 uint32_t insn2 = insn_at(insn_addr, 1); 449 // Load/store register (unsigned immediate) 450 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 451 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 452 byte_offset <<= size; 453 target += byte_offset; 454 return 2; 455 } 456 static int adrpAdd_impl(address insn_addr, address &target) { 457 uint32_t insn2 = insn_at(insn_addr, 1); 458 // add (immediate) 459 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 460 target += byte_offset; 461 return 2; 462 } 463 static int adrpMovk_impl(address insn_addr, address &target) { 464 uint32_t insn2 = insn_at(insn_addr, 1); 465 uint64_t dest = uint64_t(target); 466 dest = (dest & 0xffff0000ffffffff) | 467 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 468 target = address(dest); 469 470 // We know the destination 4k page. Maybe we have a third 471 // instruction. 472 uint32_t insn = insn_at(insn_addr, 0); 473 uint32_t insn3 = insn_at(insn_addr, 2); 474 ptrdiff_t byte_offset; 475 if (offset_for(insn, insn3, byte_offset)) { 476 target += byte_offset; 477 return 3; 478 } else { 479 return 2; 480 } 481 } 482 virtual int immediate(address insn_addr, address &target) { 483 uint32_t *insns = (uint32_t *)insn_addr; 484 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 485 // Move wide constant: movz, movk, movk. See movptr(). 486 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 487 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 488 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 489 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 490 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 491 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 492 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 493 return 3; 494 } 495 virtual void verify(address insn_addr, address &target) { 496 } 497 }; 498 499 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 500 Decoder decoder(insn_addr, insn); 501 address target; 502 decoder.run(insn_addr, target); 503 return target; 504 } 505 506 // Patch any kind of instruction; there may be several instructions. 507 // Return the total length (in bytes) of the instructions. 508 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 509 Patcher patcher(insn_addr); 510 return patcher.run(insn_addr, target); 511 } 512 513 int MacroAssembler::patch_oop(address insn_addr, address o) { 514 int instructions; 515 unsigned insn = *(unsigned*)insn_addr; 516 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 517 518 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 519 // narrow OOPs by setting the upper 16 bits in the first 520 // instruction. 521 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 522 // Move narrow OOP 523 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 524 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 525 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 526 instructions = 2; 527 } else { 528 // Move wide OOP 529 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 530 uintptr_t dest = (uintptr_t)o; 531 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 532 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 533 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 534 instructions = 3; 535 } 536 return instructions * NativeInstruction::instruction_size; 537 } 538 539 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 540 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 541 // We encode narrow ones by setting the upper 16 bits in the first 542 // instruction. 543 NativeInstruction *insn = nativeInstruction_at(insn_addr); 544 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 545 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 546 547 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 548 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 549 return 2 * NativeInstruction::instruction_size; 550 } 551 552 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 553 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 554 return nullptr; 555 } 556 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 557 } 558 559 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 560 if (acquire) { 561 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 562 ldar(tmp, tmp); 563 } else { 564 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 565 } 566 if (at_return) { 567 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 568 // we may safely use the sp instead to perform the stack watermark check. 569 cmp(in_nmethod ? sp : rfp, tmp); 570 br(Assembler::HI, slow_path); 571 } else { 572 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 573 } 574 } 575 576 void MacroAssembler::rt_call(address dest, Register tmp) { 577 CodeBlob *cb = CodeCache::find_blob(dest); 578 if (cb) { 579 far_call(RuntimeAddress(dest)); 580 } else { 581 lea(tmp, RuntimeAddress(dest)); 582 blr(tmp); 583 } 584 } 585 586 void MacroAssembler::push_cont_fastpath(Register java_thread) { 587 if (!Continuations::enabled()) return; 588 Label done; 589 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 590 cmp(sp, rscratch1); 591 br(Assembler::LS, done); 592 mov(rscratch1, sp); // we can't use sp as the source in str 593 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 594 bind(done); 595 } 596 597 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 598 if (!Continuations::enabled()) return; 599 Label done; 600 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 601 cmp(sp, rscratch1); 602 br(Assembler::LO, done); 603 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 604 bind(done); 605 } 606 607 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 608 // we must set sp to zero to clear frame 609 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 610 611 // must clear fp, so that compiled frames are not confused; it is 612 // possible that we need it only for debugging 613 if (clear_fp) { 614 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 615 } 616 617 // Always clear the pc because it could have been set by make_walkable() 618 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 619 } 620 621 // Calls to C land 622 // 623 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 624 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 625 // has to be reset to 0. This is required to allow proper stack traversal. 626 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 627 Register last_java_fp, 628 Register last_java_pc, 629 Register scratch) { 630 631 if (last_java_pc->is_valid()) { 632 str(last_java_pc, Address(rthread, 633 JavaThread::frame_anchor_offset() 634 + JavaFrameAnchor::last_Java_pc_offset())); 635 } 636 637 // determine last_java_sp register 638 if (last_java_sp == sp) { 639 mov(scratch, sp); 640 last_java_sp = scratch; 641 } else if (!last_java_sp->is_valid()) { 642 last_java_sp = esp; 643 } 644 645 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 646 647 // last_java_fp is optional 648 if (last_java_fp->is_valid()) { 649 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 650 } 651 } 652 653 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 654 Register last_java_fp, 655 address last_java_pc, 656 Register scratch) { 657 assert(last_java_pc != nullptr, "must provide a valid PC"); 658 659 adr(scratch, last_java_pc); 660 str(scratch, Address(rthread, 661 JavaThread::frame_anchor_offset() 662 + JavaFrameAnchor::last_Java_pc_offset())); 663 664 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 665 } 666 667 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 668 Register last_java_fp, 669 Label &L, 670 Register scratch) { 671 if (L.is_bound()) { 672 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 673 } else { 674 InstructionMark im(this); 675 L.add_patch_at(code(), locator()); 676 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 677 } 678 } 679 680 static inline bool target_needs_far_branch(address addr) { 681 // codecache size <= 128M 682 if (!MacroAssembler::far_branches()) { 683 return false; 684 } 685 // codecache size > 240M 686 if (MacroAssembler::codestub_branch_needs_far_jump()) { 687 return true; 688 } 689 // codecache size: 128M..240M 690 return !CodeCache::is_non_nmethod(addr); 691 } 692 693 void MacroAssembler::far_call(Address entry, Register tmp) { 694 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 695 assert(CodeCache::find_blob(entry.target()) != nullptr, 696 "destination of far call not found in code cache"); 697 assert(entry.rspec().type() == relocInfo::external_word_type 698 || entry.rspec().type() == relocInfo::runtime_call_type 699 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 700 if (target_needs_far_branch(entry.target())) { 701 uint64_t offset; 702 // We can use ADRP here because we know that the total size of 703 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 704 adrp(tmp, entry, offset); 705 add(tmp, tmp, offset); 706 blr(tmp); 707 } else { 708 bl(entry); 709 } 710 } 711 712 int MacroAssembler::far_jump(Address entry, Register tmp) { 713 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 714 assert(CodeCache::find_blob(entry.target()) != nullptr, 715 "destination of far call not found in code cache"); 716 assert(entry.rspec().type() == relocInfo::external_word_type 717 || entry.rspec().type() == relocInfo::runtime_call_type 718 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 719 address start = pc(); 720 if (target_needs_far_branch(entry.target())) { 721 uint64_t offset; 722 // We can use ADRP here because we know that the total size of 723 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 724 adrp(tmp, entry, offset); 725 add(tmp, tmp, offset); 726 br(tmp); 727 } else { 728 b(entry); 729 } 730 return pc() - start; 731 } 732 733 void MacroAssembler::reserved_stack_check() { 734 // testing if reserved zone needs to be enabled 735 Label no_reserved_zone_enabling; 736 737 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 738 cmp(sp, rscratch1); 739 br(Assembler::LO, no_reserved_zone_enabling); 740 741 enter(); // LR and FP are live. 742 lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)); 743 mov(c_rarg0, rthread); 744 blr(rscratch1); 745 leave(); 746 747 // We have already removed our own frame. 748 // throw_delayed_StackOverflowError will think that it's been 749 // called by our caller. 750 lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 751 br(rscratch1); 752 should_not_reach_here(); 753 754 bind(no_reserved_zone_enabling); 755 } 756 757 static void pass_arg0(MacroAssembler* masm, Register arg) { 758 if (c_rarg0 != arg ) { 759 masm->mov(c_rarg0, arg); 760 } 761 } 762 763 static void pass_arg1(MacroAssembler* masm, Register arg) { 764 if (c_rarg1 != arg ) { 765 masm->mov(c_rarg1, arg); 766 } 767 } 768 769 static void pass_arg2(MacroAssembler* masm, Register arg) { 770 if (c_rarg2 != arg ) { 771 masm->mov(c_rarg2, arg); 772 } 773 } 774 775 static void pass_arg3(MacroAssembler* masm, Register arg) { 776 if (c_rarg3 != arg ) { 777 masm->mov(c_rarg3, arg); 778 } 779 } 780 781 void MacroAssembler::call_VM_base(Register oop_result, 782 Register java_thread, 783 Register last_java_sp, 784 address entry_point, 785 int number_of_arguments, 786 bool check_exceptions) { 787 // determine java_thread register 788 if (!java_thread->is_valid()) { 789 java_thread = rthread; 790 } 791 792 // determine last_java_sp register 793 if (!last_java_sp->is_valid()) { 794 last_java_sp = esp; 795 } 796 797 // debugging support 798 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 799 assert(java_thread == rthread, "unexpected register"); 800 #ifdef ASSERT 801 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 802 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 803 #endif // ASSERT 804 805 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 806 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 807 808 // push java thread (becomes first argument of C function) 809 810 mov(c_rarg0, java_thread); 811 812 // set last Java frame before call 813 assert(last_java_sp != rfp, "can't use rfp"); 814 815 Label l; 816 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 817 818 // do the call, remove parameters 819 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 820 821 // lr could be poisoned with PAC signature during throw_pending_exception 822 // if it was tail-call optimized by compiler, since lr is not callee-saved 823 // reload it with proper value 824 adr(lr, l); 825 826 // reset last Java frame 827 // Only interpreter should have to clear fp 828 reset_last_Java_frame(true); 829 830 // C++ interp handles this in the interpreter 831 check_and_handle_popframe(java_thread); 832 check_and_handle_earlyret(java_thread); 833 834 if (check_exceptions) { 835 // check for pending exceptions (java_thread is set upon return) 836 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 837 Label ok; 838 cbz(rscratch1, ok); 839 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 840 br(rscratch1); 841 bind(ok); 842 } 843 844 // get oop result if there is one and reset the value in the thread 845 if (oop_result->is_valid()) { 846 get_vm_result(oop_result, java_thread); 847 } 848 } 849 850 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 851 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 852 } 853 854 // Check the entry target is always reachable from any branch. 855 static bool is_always_within_branch_range(Address entry) { 856 const address target = entry.target(); 857 858 if (!CodeCache::contains(target)) { 859 // We always use trampolines for callees outside CodeCache. 860 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 861 return false; 862 } 863 864 if (!MacroAssembler::far_branches()) { 865 return true; 866 } 867 868 if (entry.rspec().type() == relocInfo::runtime_call_type) { 869 // Runtime calls are calls of a non-compiled method (stubs, adapters). 870 // Non-compiled methods stay forever in CodeCache. 871 // We check whether the longest possible branch is within the branch range. 872 assert(CodeCache::find_blob(target) != nullptr && 873 !CodeCache::find_blob(target)->is_compiled(), 874 "runtime call of compiled method"); 875 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 876 const address left_longest_branch_start = CodeCache::low_bound(); 877 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 878 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 879 return is_reachable; 880 } 881 882 return false; 883 } 884 885 // Maybe emit a call via a trampoline. If the code cache is small 886 // trampolines won't be emitted. 887 address MacroAssembler::trampoline_call(Address entry) { 888 assert(entry.rspec().type() == relocInfo::runtime_call_type 889 || entry.rspec().type() == relocInfo::opt_virtual_call_type 890 || entry.rspec().type() == relocInfo::static_call_type 891 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 892 893 address target = entry.target(); 894 895 if (!is_always_within_branch_range(entry)) { 896 if (!in_scratch_emit_size()) { 897 // We don't want to emit a trampoline if C2 is generating dummy 898 // code during its branch shortening phase. 899 if (entry.rspec().type() == relocInfo::runtime_call_type) { 900 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 901 code()->share_trampoline_for(entry.target(), offset()); 902 } else { 903 address stub = emit_trampoline_stub(offset(), target); 904 if (stub == nullptr) { 905 postcond(pc() == badAddress); 906 return nullptr; // CodeCache is full 907 } 908 } 909 } 910 target = pc(); 911 } 912 913 address call_pc = pc(); 914 relocate(entry.rspec()); 915 bl(target); 916 917 postcond(pc() != badAddress); 918 return call_pc; 919 } 920 921 // Emit a trampoline stub for a call to a target which is too far away. 922 // 923 // code sequences: 924 // 925 // call-site: 926 // branch-and-link to <destination> or <trampoline stub> 927 // 928 // Related trampoline stub for this call site in the stub section: 929 // load the call target from the constant pool 930 // branch (LR still points to the call site above) 931 932 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 933 address dest) { 934 // Max stub size: alignment nop, TrampolineStub. 935 address stub = start_a_stub(max_trampoline_stub_size()); 936 if (stub == nullptr) { 937 return nullptr; // CodeBuffer::expand failed 938 } 939 940 // Create a trampoline stub relocation which relates this trampoline stub 941 // with the call instruction at insts_call_instruction_offset in the 942 // instructions code-section. 943 align(wordSize); 944 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 945 + insts_call_instruction_offset)); 946 const int stub_start_offset = offset(); 947 948 // Now, create the trampoline stub's code: 949 // - load the call 950 // - call 951 Label target; 952 ldr(rscratch1, target); 953 br(rscratch1); 954 bind(target); 955 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 956 "should be"); 957 emit_int64((int64_t)dest); 958 959 const address stub_start_addr = addr_at(stub_start_offset); 960 961 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 962 963 end_a_stub(); 964 return stub_start_addr; 965 } 966 967 int MacroAssembler::max_trampoline_stub_size() { 968 // Max stub size: alignment nop, TrampolineStub. 969 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 970 } 971 972 void MacroAssembler::emit_static_call_stub() { 973 // CompiledDirectCall::set_to_interpreted knows the 974 // exact layout of this stub. 975 976 isb(); 977 mov_metadata(rmethod, nullptr); 978 979 // Jump to the entry point of the c2i stub. 980 movptr(rscratch1, 0); 981 br(rscratch1); 982 } 983 984 int MacroAssembler::static_call_stub_size() { 985 // isb; movk; movz; movz; movk; movz; movz; br 986 return 8 * NativeInstruction::instruction_size; 987 } 988 989 void MacroAssembler::c2bool(Register x) { 990 // implements x == 0 ? 0 : 1 991 // note: must only look at least-significant byte of x 992 // since C-style booleans are stored in one byte 993 // only! (was bug) 994 tst(x, 0xff); 995 cset(x, Assembler::NE); 996 } 997 998 address MacroAssembler::ic_call(address entry, jint method_index) { 999 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1000 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1001 // uintptr_t offset; 1002 // ldr_constant(rscratch2, const_ptr); 1003 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1004 return trampoline_call(Address(entry, rh)); 1005 } 1006 1007 int MacroAssembler::ic_check_size() { 1008 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1009 return NativeInstruction::instruction_size * 7; 1010 } else { 1011 return NativeInstruction::instruction_size * 5; 1012 } 1013 } 1014 1015 int MacroAssembler::ic_check(int end_alignment) { 1016 Register receiver = j_rarg0; 1017 Register data = rscratch2; 1018 Register tmp1 = rscratch1; 1019 Register tmp2 = r10; 1020 1021 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1022 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1023 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1024 // before the inline cache check here, and not after 1025 align(end_alignment, offset() + ic_check_size()); 1026 1027 int uep_offset = offset(); 1028 1029 if (UseCompressedClassPointers) { 1030 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1031 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1032 cmpw(tmp1, tmp2); 1033 } else { 1034 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1035 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1036 cmp(tmp1, tmp2); 1037 } 1038 1039 Label dont; 1040 br(Assembler::EQ, dont); 1041 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1042 bind(dont); 1043 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1044 1045 return uep_offset; 1046 } 1047 1048 // Implementation of call_VM versions 1049 1050 void MacroAssembler::call_VM(Register oop_result, 1051 address entry_point, 1052 bool check_exceptions) { 1053 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1054 } 1055 1056 void MacroAssembler::call_VM(Register oop_result, 1057 address entry_point, 1058 Register arg_1, 1059 bool check_exceptions) { 1060 pass_arg1(this, arg_1); 1061 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1062 } 1063 1064 void MacroAssembler::call_VM(Register oop_result, 1065 address entry_point, 1066 Register arg_1, 1067 Register arg_2, 1068 bool check_exceptions) { 1069 assert_different_registers(arg_1, c_rarg2); 1070 pass_arg2(this, arg_2); 1071 pass_arg1(this, arg_1); 1072 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1073 } 1074 1075 void MacroAssembler::call_VM(Register oop_result, 1076 address entry_point, 1077 Register arg_1, 1078 Register arg_2, 1079 Register arg_3, 1080 bool check_exceptions) { 1081 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1082 assert_different_registers(arg_2, c_rarg3); 1083 pass_arg3(this, arg_3); 1084 1085 pass_arg2(this, arg_2); 1086 1087 pass_arg1(this, arg_1); 1088 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1089 } 1090 1091 void MacroAssembler::call_VM(Register oop_result, 1092 Register last_java_sp, 1093 address entry_point, 1094 int number_of_arguments, 1095 bool check_exceptions) { 1096 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1097 } 1098 1099 void MacroAssembler::call_VM(Register oop_result, 1100 Register last_java_sp, 1101 address entry_point, 1102 Register arg_1, 1103 bool check_exceptions) { 1104 pass_arg1(this, arg_1); 1105 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1106 } 1107 1108 void MacroAssembler::call_VM(Register oop_result, 1109 Register last_java_sp, 1110 address entry_point, 1111 Register arg_1, 1112 Register arg_2, 1113 bool check_exceptions) { 1114 1115 assert_different_registers(arg_1, c_rarg2); 1116 pass_arg2(this, arg_2); 1117 pass_arg1(this, arg_1); 1118 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1119 } 1120 1121 void MacroAssembler::call_VM(Register oop_result, 1122 Register last_java_sp, 1123 address entry_point, 1124 Register arg_1, 1125 Register arg_2, 1126 Register arg_3, 1127 bool check_exceptions) { 1128 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1129 assert_different_registers(arg_2, c_rarg3); 1130 pass_arg3(this, arg_3); 1131 pass_arg2(this, arg_2); 1132 pass_arg1(this, arg_1); 1133 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1134 } 1135 1136 1137 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1138 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1139 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1140 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1141 } 1142 1143 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1144 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1145 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1146 } 1147 1148 void MacroAssembler::align(int modulus) { 1149 align(modulus, offset()); 1150 } 1151 1152 // Ensure that the code at target bytes offset from the current offset() is aligned 1153 // according to modulus. 1154 void MacroAssembler::align(int modulus, int target) { 1155 int delta = target - offset(); 1156 while ((offset() + delta) % modulus != 0) nop(); 1157 } 1158 1159 void MacroAssembler::post_call_nop() { 1160 if (!Continuations::enabled()) { 1161 return; 1162 } 1163 InstructionMark im(this); 1164 relocate(post_call_nop_Relocation::spec()); 1165 InlineSkippedInstructionsCounter skipCounter(this); 1166 nop(); 1167 movk(zr, 0); 1168 movk(zr, 0); 1169 } 1170 1171 // these are no-ops overridden by InterpreterMacroAssembler 1172 1173 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1174 1175 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1176 1177 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 1178 #ifdef ASSERT 1179 { 1180 Label done_check; 1181 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 1182 stop("get_default_value_oop from non inline type klass"); 1183 bind(done_check); 1184 } 1185 #endif 1186 Register offset = temp_reg; 1187 // Getting the offset of the pre-allocated default value 1188 ldr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 1189 ldr(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset()))); 1190 1191 // Getting the mirror 1192 ldr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 1193 resolve_oop_handle(obj, inline_klass, temp_reg); 1194 1195 // Getting the pre-allocated default value from the mirror 1196 Address field(obj, offset); 1197 load_heap_oop(obj, field, inline_klass, rscratch2); 1198 } 1199 1200 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 1201 #ifdef ASSERT 1202 { 1203 Label done_check; 1204 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 1205 stop("get_empty_value from non-empty inline klass"); 1206 bind(done_check); 1207 } 1208 #endif 1209 get_default_value_oop(inline_klass, temp_reg, obj); 1210 } 1211 1212 // Look up the method for a megamorphic invokeinterface call. 1213 // The target method is determined by <intf_klass, itable_index>. 1214 // The receiver klass is in recv_klass. 1215 // On success, the result will be in method_result, and execution falls through. 1216 // On failure, execution transfers to the given label. 1217 void MacroAssembler::lookup_interface_method(Register recv_klass, 1218 Register intf_klass, 1219 RegisterOrConstant itable_index, 1220 Register method_result, 1221 Register scan_temp, 1222 Label& L_no_such_interface, 1223 bool return_method) { 1224 assert_different_registers(recv_klass, intf_klass, scan_temp); 1225 assert_different_registers(method_result, intf_klass, scan_temp); 1226 assert(recv_klass != method_result || !return_method, 1227 "recv_klass can be destroyed when method isn't needed"); 1228 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1229 "caller must use same register for non-constant itable index as for method"); 1230 1231 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1232 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1233 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1234 int scan_step = itableOffsetEntry::size() * wordSize; 1235 int vte_size = vtableEntry::size_in_bytes(); 1236 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1237 1238 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1239 1240 // %%% Could store the aligned, prescaled offset in the klassoop. 1241 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1242 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1243 add(scan_temp, scan_temp, vtable_base); 1244 1245 if (return_method) { 1246 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1247 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1248 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1249 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1250 if (itentry_off) 1251 add(recv_klass, recv_klass, itentry_off); 1252 } 1253 1254 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1255 // if (scan->interface() == intf) { 1256 // result = (klass + scan->offset() + itable_index); 1257 // } 1258 // } 1259 Label search, found_method; 1260 1261 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1262 cmp(intf_klass, method_result); 1263 br(Assembler::EQ, found_method); 1264 bind(search); 1265 // Check that the previous entry is non-null. A null entry means that 1266 // the receiver class doesn't implement the interface, and wasn't the 1267 // same as when the caller was compiled. 1268 cbz(method_result, L_no_such_interface); 1269 if (itableOffsetEntry::interface_offset() != 0) { 1270 add(scan_temp, scan_temp, scan_step); 1271 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1272 } else { 1273 ldr(method_result, Address(pre(scan_temp, scan_step))); 1274 } 1275 cmp(intf_klass, method_result); 1276 br(Assembler::NE, search); 1277 1278 bind(found_method); 1279 1280 // Got a hit. 1281 if (return_method) { 1282 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1283 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1284 } 1285 } 1286 1287 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1288 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1289 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1290 // The target method is determined by <holder_klass, itable_index>. 1291 // The receiver klass is in recv_klass. 1292 // On success, the result will be in method_result, and execution falls through. 1293 // On failure, execution transfers to the given label. 1294 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1295 Register holder_klass, 1296 Register resolved_klass, 1297 Register method_result, 1298 Register temp_itbl_klass, 1299 Register scan_temp, 1300 int itable_index, 1301 Label& L_no_such_interface) { 1302 // 'method_result' is only used as output register at the very end of this method. 1303 // Until then we can reuse it as 'holder_offset'. 1304 Register holder_offset = method_result; 1305 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1306 1307 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1308 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1309 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1310 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1311 1312 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1313 1314 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1315 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1316 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1317 // temp_itbl_klass = itable[0]._interface; 1318 int vtblEntrySize = vtableEntry::size_in_bytes(); 1319 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1320 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1321 mov(holder_offset, zr); 1322 // scan_temp = &(itable[0]._interface) 1323 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1324 1325 // Initial checks: 1326 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1327 // - if (itable[0] == holder_klass), shortcut to "holder found" 1328 // - if (itable[0] == 0), no such interface 1329 cmp(resolved_klass, holder_klass); 1330 br(Assembler::NE, L_loop_search_resolved_entry); 1331 cmp(holder_klass, temp_itbl_klass); 1332 br(Assembler::EQ, L_holder_found); 1333 cbz(temp_itbl_klass, L_no_such_interface); 1334 1335 // Loop: Look for holder_klass record in itable 1336 // do { 1337 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1338 // if (temp_itbl_klass == holder_klass) { 1339 // goto L_holder_found; // Found! 1340 // } 1341 // } while (temp_itbl_klass != 0); 1342 // goto L_no_such_interface // Not found. 1343 Label L_search_holder; 1344 bind(L_search_holder); 1345 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1346 cmp(holder_klass, temp_itbl_klass); 1347 br(Assembler::EQ, L_holder_found); 1348 cbnz(temp_itbl_klass, L_search_holder); 1349 1350 b(L_no_such_interface); 1351 1352 // Loop: Look for resolved_class record in itable 1353 // while (true) { 1354 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1355 // if (temp_itbl_klass == 0) { 1356 // goto L_no_such_interface; 1357 // } 1358 // if (temp_itbl_klass == resolved_klass) { 1359 // goto L_resolved_found; // Found! 1360 // } 1361 // if (temp_itbl_klass == holder_klass) { 1362 // holder_offset = scan_temp; 1363 // } 1364 // } 1365 // 1366 Label L_loop_search_resolved; 1367 bind(L_loop_search_resolved); 1368 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1369 bind(L_loop_search_resolved_entry); 1370 cbz(temp_itbl_klass, L_no_such_interface); 1371 cmp(resolved_klass, temp_itbl_klass); 1372 br(Assembler::EQ, L_resolved_found); 1373 cmp(holder_klass, temp_itbl_klass); 1374 br(Assembler::NE, L_loop_search_resolved); 1375 mov(holder_offset, scan_temp); 1376 b(L_loop_search_resolved); 1377 1378 // See if we already have a holder klass. If not, go and scan for it. 1379 bind(L_resolved_found); 1380 cbz(holder_offset, L_search_holder); 1381 mov(scan_temp, holder_offset); 1382 1383 // Finally, scan_temp contains holder_klass vtable offset 1384 bind(L_holder_found); 1385 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1386 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1387 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1388 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1389 } 1390 1391 // virtual method calling 1392 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1393 RegisterOrConstant vtable_index, 1394 Register method_result) { 1395 assert(vtableEntry::size() * wordSize == 8, 1396 "adjust the scaling in the code below"); 1397 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1398 1399 if (vtable_index.is_register()) { 1400 lea(method_result, Address(recv_klass, 1401 vtable_index.as_register(), 1402 Address::lsl(LogBytesPerWord))); 1403 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1404 } else { 1405 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1406 ldr(method_result, 1407 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1408 } 1409 } 1410 1411 void MacroAssembler::check_klass_subtype(Register sub_klass, 1412 Register super_klass, 1413 Register temp_reg, 1414 Label& L_success) { 1415 Label L_failure; 1416 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1417 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1418 bind(L_failure); 1419 } 1420 1421 1422 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1423 Register super_klass, 1424 Register temp_reg, 1425 Label* L_success, 1426 Label* L_failure, 1427 Label* L_slow_path, 1428 RegisterOrConstant super_check_offset) { 1429 assert_different_registers(sub_klass, super_klass, temp_reg); 1430 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1431 if (super_check_offset.is_register()) { 1432 assert_different_registers(sub_klass, super_klass, 1433 super_check_offset.as_register()); 1434 } else if (must_load_sco) { 1435 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1436 } 1437 1438 Label L_fallthrough; 1439 int label_nulls = 0; 1440 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1441 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1442 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1443 assert(label_nulls <= 1, "at most one null in the batch"); 1444 1445 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1446 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1447 Address super_check_offset_addr(super_klass, sco_offset); 1448 1449 // Hacked jmp, which may only be used just before L_fallthrough. 1450 #define final_jmp(label) \ 1451 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1452 else b(label) /*omit semi*/ 1453 1454 // If the pointers are equal, we are done (e.g., String[] elements). 1455 // This self-check enables sharing of secondary supertype arrays among 1456 // non-primary types such as array-of-interface. Otherwise, each such 1457 // type would need its own customized SSA. 1458 // We move this check to the front of the fast path because many 1459 // type checks are in fact trivially successful in this manner, 1460 // so we get a nicely predicted branch right at the start of the check. 1461 cmp(sub_klass, super_klass); 1462 br(Assembler::EQ, *L_success); 1463 1464 // Check the supertype display: 1465 if (must_load_sco) { 1466 ldrw(temp_reg, super_check_offset_addr); 1467 super_check_offset = RegisterOrConstant(temp_reg); 1468 } 1469 Address super_check_addr(sub_klass, super_check_offset); 1470 ldr(rscratch1, super_check_addr); 1471 cmp(super_klass, rscratch1); // load displayed supertype 1472 1473 // This check has worked decisively for primary supers. 1474 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1475 // (Secondary supers are interfaces and very deeply nested subtypes.) 1476 // This works in the same check above because of a tricky aliasing 1477 // between the super_cache and the primary super display elements. 1478 // (The 'super_check_addr' can address either, as the case requires.) 1479 // Note that the cache is updated below if it does not help us find 1480 // what we need immediately. 1481 // So if it was a primary super, we can just fail immediately. 1482 // Otherwise, it's the slow path for us (no success at this point). 1483 1484 if (super_check_offset.is_register()) { 1485 br(Assembler::EQ, *L_success); 1486 subs(zr, super_check_offset.as_register(), sc_offset); 1487 if (L_failure == &L_fallthrough) { 1488 br(Assembler::EQ, *L_slow_path); 1489 } else { 1490 br(Assembler::NE, *L_failure); 1491 final_jmp(*L_slow_path); 1492 } 1493 } else if (super_check_offset.as_constant() == sc_offset) { 1494 // Need a slow path; fast failure is impossible. 1495 if (L_slow_path == &L_fallthrough) { 1496 br(Assembler::EQ, *L_success); 1497 } else { 1498 br(Assembler::NE, *L_slow_path); 1499 final_jmp(*L_success); 1500 } 1501 } else { 1502 // No slow path; it's a fast decision. 1503 if (L_failure == &L_fallthrough) { 1504 br(Assembler::EQ, *L_success); 1505 } else { 1506 br(Assembler::NE, *L_failure); 1507 final_jmp(*L_success); 1508 } 1509 } 1510 1511 bind(L_fallthrough); 1512 1513 #undef final_jmp 1514 } 1515 1516 // These two are taken from x86, but they look generally useful 1517 1518 // scans count pointer sized words at [addr] for occurrence of value, 1519 // generic 1520 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1521 Register scratch) { 1522 Label Lloop, Lexit; 1523 cbz(count, Lexit); 1524 bind(Lloop); 1525 ldr(scratch, post(addr, wordSize)); 1526 cmp(value, scratch); 1527 br(EQ, Lexit); 1528 sub(count, count, 1); 1529 cbnz(count, Lloop); 1530 bind(Lexit); 1531 } 1532 1533 // scans count 4 byte words at [addr] for occurrence of value, 1534 // generic 1535 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1536 Register scratch) { 1537 Label Lloop, Lexit; 1538 cbz(count, Lexit); 1539 bind(Lloop); 1540 ldrw(scratch, post(addr, wordSize)); 1541 cmpw(value, scratch); 1542 br(EQ, Lexit); 1543 sub(count, count, 1); 1544 cbnz(count, Lloop); 1545 bind(Lexit); 1546 } 1547 1548 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1549 Register super_klass, 1550 Register temp_reg, 1551 Register temp2_reg, 1552 Label* L_success, 1553 Label* L_failure, 1554 bool set_cond_codes) { 1555 assert_different_registers(sub_klass, super_klass, temp_reg); 1556 if (temp2_reg != noreg) 1557 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1558 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1559 1560 Label L_fallthrough; 1561 int label_nulls = 0; 1562 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1563 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1564 assert(label_nulls <= 1, "at most one null in the batch"); 1565 1566 // a couple of useful fields in sub_klass: 1567 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1568 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1569 Address secondary_supers_addr(sub_klass, ss_offset); 1570 Address super_cache_addr( sub_klass, sc_offset); 1571 1572 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1573 1574 // Do a linear scan of the secondary super-klass chain. 1575 // This code is rarely used, so simplicity is a virtue here. 1576 // The repne_scan instruction uses fixed registers, which we must spill. 1577 // Don't worry too much about pre-existing connections with the input regs. 1578 1579 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1580 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1581 1582 RegSet pushed_registers; 1583 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1584 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1585 1586 if (super_klass != r0) { 1587 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1588 } 1589 1590 push(pushed_registers, sp); 1591 1592 // Get super_klass value into r0 (even if it was in r5 or r2). 1593 if (super_klass != r0) { 1594 mov(r0, super_klass); 1595 } 1596 1597 #ifndef PRODUCT 1598 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1599 Address pst_counter_addr(rscratch2); 1600 ldr(rscratch1, pst_counter_addr); 1601 add(rscratch1, rscratch1, 1); 1602 str(rscratch1, pst_counter_addr); 1603 #endif //PRODUCT 1604 1605 // We will consult the secondary-super array. 1606 ldr(r5, secondary_supers_addr); 1607 // Load the array length. 1608 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1609 // Skip to start of data. 1610 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1611 1612 cmp(sp, zr); // Clear Z flag; SP is never zero 1613 // Scan R2 words at [R5] for an occurrence of R0. 1614 // Set NZ/Z based on last compare. 1615 repne_scan(r5, r0, r2, rscratch1); 1616 1617 // Unspill the temp. registers: 1618 pop(pushed_registers, sp); 1619 1620 br(Assembler::NE, *L_failure); 1621 1622 // Success. Cache the super we found and proceed in triumph. 1623 str(super_klass, super_cache_addr); 1624 1625 if (L_success != &L_fallthrough) { 1626 b(*L_success); 1627 } 1628 1629 #undef IS_A_TEMP 1630 1631 bind(L_fallthrough); 1632 } 1633 1634 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1635 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1636 assert_different_registers(klass, rthread, scratch); 1637 1638 Label L_fallthrough, L_tmp; 1639 if (L_fast_path == nullptr) { 1640 L_fast_path = &L_fallthrough; 1641 } else if (L_slow_path == nullptr) { 1642 L_slow_path = &L_fallthrough; 1643 } 1644 // Fast path check: class is fully initialized 1645 ldrb(scratch, Address(klass, InstanceKlass::init_state_offset())); 1646 subs(zr, scratch, InstanceKlass::fully_initialized); 1647 br(Assembler::EQ, *L_fast_path); 1648 1649 // Fast path check: current thread is initializer thread 1650 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1651 cmp(rthread, scratch); 1652 1653 if (L_slow_path == &L_fallthrough) { 1654 br(Assembler::EQ, *L_fast_path); 1655 bind(*L_slow_path); 1656 } else if (L_fast_path == &L_fallthrough) { 1657 br(Assembler::NE, *L_slow_path); 1658 bind(*L_fast_path); 1659 } else { 1660 Unimplemented(); 1661 } 1662 } 1663 1664 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1665 if (!VerifyOops || VerifyAdapterSharing) { 1666 // Below address of the code string confuses VerifyAdapterSharing 1667 // because it may differ between otherwise equivalent adapters. 1668 return; 1669 } 1670 1671 // Pass register number to verify_oop_subroutine 1672 const char* b = nullptr; 1673 { 1674 ResourceMark rm; 1675 stringStream ss; 1676 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1677 b = code_string(ss.as_string()); 1678 } 1679 BLOCK_COMMENT("verify_oop {"); 1680 1681 strip_return_address(); // This might happen within a stack frame. 1682 protect_return_address(); 1683 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1684 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1685 1686 mov(r0, reg); 1687 movptr(rscratch1, (uintptr_t)(address)b); 1688 1689 // call indirectly to solve generation ordering problem 1690 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1691 ldr(rscratch2, Address(rscratch2)); 1692 blr(rscratch2); 1693 1694 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1695 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1696 authenticate_return_address(); 1697 1698 BLOCK_COMMENT("} verify_oop"); 1699 } 1700 1701 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1702 if (!VerifyOops || VerifyAdapterSharing) { 1703 // Below address of the code string confuses VerifyAdapterSharing 1704 // because it may differ between otherwise equivalent adapters. 1705 return; 1706 } 1707 1708 const char* b = nullptr; 1709 { 1710 ResourceMark rm; 1711 stringStream ss; 1712 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1713 b = code_string(ss.as_string()); 1714 } 1715 BLOCK_COMMENT("verify_oop_addr {"); 1716 1717 strip_return_address(); // This might happen within a stack frame. 1718 protect_return_address(); 1719 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1720 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1721 1722 // addr may contain sp so we will have to adjust it based on the 1723 // pushes that we just did. 1724 if (addr.uses(sp)) { 1725 lea(r0, addr); 1726 ldr(r0, Address(r0, 4 * wordSize)); 1727 } else { 1728 ldr(r0, addr); 1729 } 1730 movptr(rscratch1, (uintptr_t)(address)b); 1731 1732 // call indirectly to solve generation ordering problem 1733 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1734 ldr(rscratch2, Address(rscratch2)); 1735 blr(rscratch2); 1736 1737 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1738 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1739 authenticate_return_address(); 1740 1741 BLOCK_COMMENT("} verify_oop_addr"); 1742 } 1743 1744 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1745 int extra_slot_offset) { 1746 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1747 int stackElementSize = Interpreter::stackElementSize; 1748 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1749 #ifdef ASSERT 1750 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1751 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1752 #endif 1753 if (arg_slot.is_constant()) { 1754 return Address(esp, arg_slot.as_constant() * stackElementSize 1755 + offset); 1756 } else { 1757 add(rscratch1, esp, arg_slot.as_register(), 1758 ext::uxtx, exact_log2(stackElementSize)); 1759 return Address(rscratch1, offset); 1760 } 1761 } 1762 1763 void MacroAssembler::call_VM_leaf_base(address entry_point, 1764 int number_of_arguments, 1765 Label *retaddr) { 1766 Label E, L; 1767 1768 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1769 1770 mov(rscratch1, entry_point); 1771 blr(rscratch1); 1772 if (retaddr) 1773 bind(*retaddr); 1774 1775 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1776 } 1777 1778 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1779 call_VM_leaf_base(entry_point, number_of_arguments); 1780 } 1781 1782 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1783 pass_arg0(this, arg_0); 1784 call_VM_leaf_base(entry_point, 1); 1785 } 1786 1787 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1788 assert_different_registers(arg_1, c_rarg0); 1789 pass_arg0(this, arg_0); 1790 pass_arg1(this, arg_1); 1791 call_VM_leaf_base(entry_point, 2); 1792 } 1793 1794 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1795 Register arg_1, Register arg_2) { 1796 assert_different_registers(arg_1, c_rarg0); 1797 assert_different_registers(arg_2, c_rarg0, c_rarg1); 1798 pass_arg0(this, arg_0); 1799 pass_arg1(this, arg_1); 1800 pass_arg2(this, arg_2); 1801 call_VM_leaf_base(entry_point, 3); 1802 } 1803 1804 void MacroAssembler::super_call_VM_leaf(address entry_point) { 1805 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1806 } 1807 1808 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1809 pass_arg0(this, arg_0); 1810 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1811 } 1812 1813 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1814 1815 assert_different_registers(arg_0, c_rarg1); 1816 pass_arg1(this, arg_1); 1817 pass_arg0(this, arg_0); 1818 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1819 } 1820 1821 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1822 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1823 assert_different_registers(arg_1, c_rarg2); 1824 pass_arg2(this, arg_2); 1825 pass_arg1(this, arg_1); 1826 pass_arg0(this, arg_0); 1827 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1828 } 1829 1830 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1831 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1832 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1833 assert_different_registers(arg_2, c_rarg3); 1834 pass_arg3(this, arg_3); 1835 pass_arg2(this, arg_2); 1836 pass_arg1(this, arg_1); 1837 pass_arg0(this, arg_0); 1838 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1839 } 1840 1841 void MacroAssembler::null_check(Register reg, int offset) { 1842 if (needs_explicit_null_check(offset)) { 1843 // provoke OS null exception if reg is null by 1844 // accessing M[reg] w/o changing any registers 1845 // NOTE: this is plenty to provoke a segv 1846 ldr(zr, Address(reg)); 1847 } else { 1848 // nothing to do, (later) access of M[reg + offset] 1849 // will provoke OS null exception if reg is null 1850 } 1851 } 1852 1853 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 1854 assert_different_registers(markword, rscratch2); 1855 andr(markword, markword, markWord::inline_type_mask_in_place); 1856 mov(rscratch2, markWord::inline_type_pattern); 1857 cmp(markword, rscratch2); 1858 br(Assembler::EQ, is_inline_type); 1859 } 1860 1861 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 1862 ldrw(temp_reg, Address(klass, Klass::access_flags_offset())); 1863 andr(temp_reg, temp_reg, JVM_ACC_IDENTITY); 1864 cbz(temp_reg, is_inline_type); 1865 } 1866 1867 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 1868 assert_different_registers(tmp, rscratch1); 1869 cbz(object, not_inline_type); 1870 const int is_inline_type_mask = markWord::inline_type_pattern; 1871 ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 1872 mov(rscratch1, is_inline_type_mask); 1873 andr(tmp, tmp, rscratch1); 1874 cmp(tmp, rscratch1); 1875 br(Assembler::NE, not_inline_type); 1876 } 1877 1878 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 1879 #ifdef ASSERT 1880 { 1881 Label done_check; 1882 test_klass_is_inline_type(klass, temp_reg, done_check); 1883 stop("test_klass_is_empty_inline_type with non inline type klass"); 1884 bind(done_check); 1885 } 1886 #endif 1887 ldrw(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 1888 andr(temp_reg, temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 1889 cbnz(temp_reg, is_empty_inline_type); 1890 } 1891 1892 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 1893 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 1894 tbnz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, is_null_free_inline_type); 1895 } 1896 1897 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 1898 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 1899 tbz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, not_null_free_inline_type); 1900 } 1901 1902 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 1903 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 1904 tbnz(flags, ResolvedFieldEntry::is_flat_shift, is_flat); 1905 } 1906 1907 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) { 1908 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 1909 tbnz(flags, ResolvedFieldEntry::has_null_marker_shift, has_null_marker); 1910 } 1911 1912 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 1913 Label test_mark_word; 1914 // load mark word 1915 ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 1916 // check displaced 1917 tst(temp_reg, markWord::unlocked_value); 1918 br(Assembler::NE, test_mark_word); 1919 // slow path use klass prototype 1920 load_prototype_header(temp_reg, oop); 1921 1922 bind(test_mark_word); 1923 andr(temp_reg, temp_reg, test_bit); 1924 if (jmp_set) { 1925 cbnz(temp_reg, jmp_label); 1926 } else { 1927 cbz(temp_reg, jmp_label); 1928 } 1929 } 1930 1931 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) { 1932 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 1933 } 1934 1935 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 1936 Label&is_non_flat_array) { 1937 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 1938 } 1939 1940 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) { 1941 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 1942 } 1943 1944 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 1945 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 1946 } 1947 1948 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 1949 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 1950 br(Assembler::NE, is_flat_array); 1951 } 1952 1953 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 1954 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 1955 br(Assembler::EQ, is_non_flat_array); 1956 } 1957 1958 // MacroAssembler protected routines needed to implement 1959 // public methods 1960 1961 void MacroAssembler::mov(Register r, Address dest) { 1962 code_section()->relocate(pc(), dest.rspec()); 1963 uint64_t imm64 = (uint64_t)dest.target(); 1964 movptr(r, imm64); 1965 } 1966 1967 // Move a constant pointer into r. In AArch64 mode the virtual 1968 // address space is 48 bits in size, so we only need three 1969 // instructions to create a patchable instruction sequence that can 1970 // reach anywhere. 1971 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1972 #ifndef PRODUCT 1973 { 1974 char buffer[64]; 1975 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 1976 block_comment(buffer); 1977 } 1978 #endif 1979 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 1980 movz(r, imm64 & 0xffff); 1981 imm64 >>= 16; 1982 movk(r, imm64 & 0xffff, 16); 1983 imm64 >>= 16; 1984 movk(r, imm64 & 0xffff, 32); 1985 } 1986 1987 // Macro to mov replicated immediate to vector register. 1988 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 1989 // the upper 56/48/32 bits must be zeros for B/H/S type. 1990 // Vd will get the following values for different arrangements in T 1991 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 1992 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1993 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 1994 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1995 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1996 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1997 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 1998 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 1999 // Clobbers rscratch1 2000 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2001 assert(T != T1Q, "unsupported"); 2002 if (T == T1D || T == T2D) { 2003 int imm = operand_valid_for_movi_immediate(imm64, T); 2004 if (-1 != imm) { 2005 movi(Vd, T, imm); 2006 } else { 2007 mov(rscratch1, imm64); 2008 dup(Vd, T, rscratch1); 2009 } 2010 return; 2011 } 2012 2013 #ifdef ASSERT 2014 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2015 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2016 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2017 #endif 2018 int shift = operand_valid_for_movi_immediate(imm64, T); 2019 uint32_t imm32 = imm64 & 0xffffffffULL; 2020 if (shift >= 0) { 2021 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2022 } else { 2023 movw(rscratch1, imm32); 2024 dup(Vd, T, rscratch1); 2025 } 2026 } 2027 2028 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2029 { 2030 #ifndef PRODUCT 2031 { 2032 char buffer[64]; 2033 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2034 block_comment(buffer); 2035 } 2036 #endif 2037 if (operand_valid_for_logical_immediate(false, imm64)) { 2038 orr(dst, zr, imm64); 2039 } else { 2040 // we can use a combination of MOVZ or MOVN with 2041 // MOVK to build up the constant 2042 uint64_t imm_h[4]; 2043 int zero_count = 0; 2044 int neg_count = 0; 2045 int i; 2046 for (i = 0; i < 4; i++) { 2047 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2048 if (imm_h[i] == 0) { 2049 zero_count++; 2050 } else if (imm_h[i] == 0xffffL) { 2051 neg_count++; 2052 } 2053 } 2054 if (zero_count == 4) { 2055 // one MOVZ will do 2056 movz(dst, 0); 2057 } else if (neg_count == 4) { 2058 // one MOVN will do 2059 movn(dst, 0); 2060 } else if (zero_count == 3) { 2061 for (i = 0; i < 4; i++) { 2062 if (imm_h[i] != 0L) { 2063 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2064 break; 2065 } 2066 } 2067 } else if (neg_count == 3) { 2068 // one MOVN will do 2069 for (int i = 0; i < 4; i++) { 2070 if (imm_h[i] != 0xffffL) { 2071 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2072 break; 2073 } 2074 } 2075 } else if (zero_count == 2) { 2076 // one MOVZ and one MOVK will do 2077 for (i = 0; i < 3; i++) { 2078 if (imm_h[i] != 0L) { 2079 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2080 i++; 2081 break; 2082 } 2083 } 2084 for (;i < 4; i++) { 2085 if (imm_h[i] != 0L) { 2086 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2087 } 2088 } 2089 } else if (neg_count == 2) { 2090 // one MOVN and one MOVK will do 2091 for (i = 0; i < 4; i++) { 2092 if (imm_h[i] != 0xffffL) { 2093 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2094 i++; 2095 break; 2096 } 2097 } 2098 for (;i < 4; i++) { 2099 if (imm_h[i] != 0xffffL) { 2100 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2101 } 2102 } 2103 } else if (zero_count == 1) { 2104 // one MOVZ and two MOVKs will do 2105 for (i = 0; i < 4; i++) { 2106 if (imm_h[i] != 0L) { 2107 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2108 i++; 2109 break; 2110 } 2111 } 2112 for (;i < 4; i++) { 2113 if (imm_h[i] != 0x0L) { 2114 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2115 } 2116 } 2117 } else if (neg_count == 1) { 2118 // one MOVN and two MOVKs will do 2119 for (i = 0; i < 4; i++) { 2120 if (imm_h[i] != 0xffffL) { 2121 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2122 i++; 2123 break; 2124 } 2125 } 2126 for (;i < 4; i++) { 2127 if (imm_h[i] != 0xffffL) { 2128 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2129 } 2130 } 2131 } else { 2132 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2133 movz(dst, (uint32_t)imm_h[0], 0); 2134 for (i = 1; i < 4; i++) { 2135 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2136 } 2137 } 2138 } 2139 } 2140 2141 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2142 { 2143 #ifndef PRODUCT 2144 { 2145 char buffer[64]; 2146 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2147 block_comment(buffer); 2148 } 2149 #endif 2150 if (operand_valid_for_logical_immediate(true, imm32)) { 2151 orrw(dst, zr, imm32); 2152 } else { 2153 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2154 // constant 2155 uint32_t imm_h[2]; 2156 imm_h[0] = imm32 & 0xffff; 2157 imm_h[1] = ((imm32 >> 16) & 0xffff); 2158 if (imm_h[0] == 0) { 2159 movzw(dst, imm_h[1], 16); 2160 } else if (imm_h[0] == 0xffff) { 2161 movnw(dst, imm_h[1] ^ 0xffff, 16); 2162 } else if (imm_h[1] == 0) { 2163 movzw(dst, imm_h[0], 0); 2164 } else if (imm_h[1] == 0xffff) { 2165 movnw(dst, imm_h[0] ^ 0xffff, 0); 2166 } else { 2167 // use a MOVZ and MOVK (makes it easier to debug) 2168 movzw(dst, imm_h[0], 0); 2169 movkw(dst, imm_h[1], 16); 2170 } 2171 } 2172 } 2173 2174 // Form an address from base + offset in Rd. Rd may or may 2175 // not actually be used: you must use the Address that is returned. 2176 // It is up to you to ensure that the shift provided matches the size 2177 // of your data. 2178 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2179 if (Address::offset_ok_for_immed(byte_offset, shift)) 2180 // It fits; no need for any heroics 2181 return Address(base, byte_offset); 2182 2183 // Don't do anything clever with negative or misaligned offsets 2184 unsigned mask = (1 << shift) - 1; 2185 if (byte_offset < 0 || byte_offset & mask) { 2186 mov(Rd, byte_offset); 2187 add(Rd, base, Rd); 2188 return Address(Rd); 2189 } 2190 2191 // See if we can do this with two 12-bit offsets 2192 { 2193 uint64_t word_offset = byte_offset >> shift; 2194 uint64_t masked_offset = word_offset & 0xfff000; 2195 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2196 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2197 add(Rd, base, masked_offset << shift); 2198 word_offset -= masked_offset; 2199 return Address(Rd, word_offset << shift); 2200 } 2201 } 2202 2203 // Do it the hard way 2204 mov(Rd, byte_offset); 2205 add(Rd, base, Rd); 2206 return Address(Rd); 2207 } 2208 2209 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2210 bool want_remainder, Register scratch) 2211 { 2212 // Full implementation of Java idiv and irem. The function 2213 // returns the (pc) offset of the div instruction - may be needed 2214 // for implicit exceptions. 2215 // 2216 // constraint : ra/rb =/= scratch 2217 // normal case 2218 // 2219 // input : ra: dividend 2220 // rb: divisor 2221 // 2222 // result: either 2223 // quotient (= ra idiv rb) 2224 // remainder (= ra irem rb) 2225 2226 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2227 2228 int idivl_offset = offset(); 2229 if (! want_remainder) { 2230 sdivw(result, ra, rb); 2231 } else { 2232 sdivw(scratch, ra, rb); 2233 Assembler::msubw(result, scratch, rb, ra); 2234 } 2235 2236 return idivl_offset; 2237 } 2238 2239 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2240 bool want_remainder, Register scratch) 2241 { 2242 // Full implementation of Java ldiv and lrem. The function 2243 // returns the (pc) offset of the div instruction - may be needed 2244 // for implicit exceptions. 2245 // 2246 // constraint : ra/rb =/= scratch 2247 // normal case 2248 // 2249 // input : ra: dividend 2250 // rb: divisor 2251 // 2252 // result: either 2253 // quotient (= ra idiv rb) 2254 // remainder (= ra irem rb) 2255 2256 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2257 2258 int idivq_offset = offset(); 2259 if (! want_remainder) { 2260 sdiv(result, ra, rb); 2261 } else { 2262 sdiv(scratch, ra, rb); 2263 Assembler::msub(result, scratch, rb, ra); 2264 } 2265 2266 return idivq_offset; 2267 } 2268 2269 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2270 address prev = pc() - NativeMembar::instruction_size; 2271 address last = code()->last_insn(); 2272 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2273 NativeMembar *bar = NativeMembar_at(prev); 2274 // We are merging two memory barrier instructions. On AArch64 we 2275 // can do this simply by ORing them together. 2276 bar->set_kind(bar->get_kind() | order_constraint); 2277 BLOCK_COMMENT("merged membar"); 2278 } else { 2279 code()->set_last_insn(pc()); 2280 dmb(Assembler::barrier(order_constraint)); 2281 } 2282 } 2283 2284 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2285 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2286 merge_ldst(rt, adr, size_in_bytes, is_store); 2287 code()->clear_last_insn(); 2288 return true; 2289 } else { 2290 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2291 const uint64_t mask = size_in_bytes - 1; 2292 if (adr.getMode() == Address::base_plus_offset && 2293 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2294 code()->set_last_insn(pc()); 2295 } 2296 return false; 2297 } 2298 } 2299 2300 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2301 // We always try to merge two adjacent loads into one ldp. 2302 if (!try_merge_ldst(Rx, adr, 8, false)) { 2303 Assembler::ldr(Rx, adr); 2304 } 2305 } 2306 2307 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2308 // We always try to merge two adjacent loads into one ldp. 2309 if (!try_merge_ldst(Rw, adr, 4, false)) { 2310 Assembler::ldrw(Rw, adr); 2311 } 2312 } 2313 2314 void MacroAssembler::str(Register Rx, const Address &adr) { 2315 // We always try to merge two adjacent stores into one stp. 2316 if (!try_merge_ldst(Rx, adr, 8, true)) { 2317 Assembler::str(Rx, adr); 2318 } 2319 } 2320 2321 void MacroAssembler::strw(Register Rw, const Address &adr) { 2322 // We always try to merge two adjacent stores into one stp. 2323 if (!try_merge_ldst(Rw, adr, 4, true)) { 2324 Assembler::strw(Rw, adr); 2325 } 2326 } 2327 2328 // MacroAssembler routines found actually to be needed 2329 2330 void MacroAssembler::push(Register src) 2331 { 2332 str(src, Address(pre(esp, -1 * wordSize))); 2333 } 2334 2335 void MacroAssembler::pop(Register dst) 2336 { 2337 ldr(dst, Address(post(esp, 1 * wordSize))); 2338 } 2339 2340 // Note: load_unsigned_short used to be called load_unsigned_word. 2341 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2342 int off = offset(); 2343 ldrh(dst, src); 2344 return off; 2345 } 2346 2347 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2348 int off = offset(); 2349 ldrb(dst, src); 2350 return off; 2351 } 2352 2353 int MacroAssembler::load_signed_short(Register dst, Address src) { 2354 int off = offset(); 2355 ldrsh(dst, src); 2356 return off; 2357 } 2358 2359 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2360 int off = offset(); 2361 ldrsb(dst, src); 2362 return off; 2363 } 2364 2365 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2366 int off = offset(); 2367 ldrshw(dst, src); 2368 return off; 2369 } 2370 2371 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2372 int off = offset(); 2373 ldrsbw(dst, src); 2374 return off; 2375 } 2376 2377 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2378 switch (size_in_bytes) { 2379 case 8: ldr(dst, src); break; 2380 case 4: ldrw(dst, src); break; 2381 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2382 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2383 default: ShouldNotReachHere(); 2384 } 2385 } 2386 2387 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2388 switch (size_in_bytes) { 2389 case 8: str(src, dst); break; 2390 case 4: strw(src, dst); break; 2391 case 2: strh(src, dst); break; 2392 case 1: strb(src, dst); break; 2393 default: ShouldNotReachHere(); 2394 } 2395 } 2396 2397 void MacroAssembler::decrementw(Register reg, int value) 2398 { 2399 if (value < 0) { incrementw(reg, -value); return; } 2400 if (value == 0) { return; } 2401 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2402 /* else */ { 2403 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2404 movw(rscratch2, (unsigned)value); 2405 subw(reg, reg, rscratch2); 2406 } 2407 } 2408 2409 void MacroAssembler::decrement(Register reg, int value) 2410 { 2411 if (value < 0) { increment(reg, -value); return; } 2412 if (value == 0) { return; } 2413 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2414 /* else */ { 2415 assert(reg != rscratch2, "invalid dst for register decrement"); 2416 mov(rscratch2, (uint64_t)value); 2417 sub(reg, reg, rscratch2); 2418 } 2419 } 2420 2421 void MacroAssembler::decrementw(Address dst, int value) 2422 { 2423 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2424 if (dst.getMode() == Address::literal) { 2425 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2426 lea(rscratch2, dst); 2427 dst = Address(rscratch2); 2428 } 2429 ldrw(rscratch1, dst); 2430 decrementw(rscratch1, value); 2431 strw(rscratch1, dst); 2432 } 2433 2434 void MacroAssembler::decrement(Address dst, int value) 2435 { 2436 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2437 if (dst.getMode() == Address::literal) { 2438 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2439 lea(rscratch2, dst); 2440 dst = Address(rscratch2); 2441 } 2442 ldr(rscratch1, dst); 2443 decrement(rscratch1, value); 2444 str(rscratch1, dst); 2445 } 2446 2447 void MacroAssembler::incrementw(Register reg, int value) 2448 { 2449 if (value < 0) { decrementw(reg, -value); return; } 2450 if (value == 0) { return; } 2451 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2452 /* else */ { 2453 assert(reg != rscratch2, "invalid dst for register increment"); 2454 movw(rscratch2, (unsigned)value); 2455 addw(reg, reg, rscratch2); 2456 } 2457 } 2458 2459 void MacroAssembler::increment(Register reg, int value) 2460 { 2461 if (value < 0) { decrement(reg, -value); return; } 2462 if (value == 0) { return; } 2463 if (value < (1 << 12)) { add(reg, reg, value); return; } 2464 /* else */ { 2465 assert(reg != rscratch2, "invalid dst for register increment"); 2466 movw(rscratch2, (unsigned)value); 2467 add(reg, reg, rscratch2); 2468 } 2469 } 2470 2471 void MacroAssembler::incrementw(Address dst, int value) 2472 { 2473 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2474 if (dst.getMode() == Address::literal) { 2475 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2476 lea(rscratch2, dst); 2477 dst = Address(rscratch2); 2478 } 2479 ldrw(rscratch1, dst); 2480 incrementw(rscratch1, value); 2481 strw(rscratch1, dst); 2482 } 2483 2484 void MacroAssembler::increment(Address dst, int value) 2485 { 2486 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2487 if (dst.getMode() == Address::literal) { 2488 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2489 lea(rscratch2, dst); 2490 dst = Address(rscratch2); 2491 } 2492 ldr(rscratch1, dst); 2493 increment(rscratch1, value); 2494 str(rscratch1, dst); 2495 } 2496 2497 // Push lots of registers in the bit set supplied. Don't push sp. 2498 // Return the number of words pushed 2499 int MacroAssembler::push(unsigned int bitset, Register stack) { 2500 int words_pushed = 0; 2501 2502 // Scan bitset to accumulate register pairs 2503 unsigned char regs[32]; 2504 int count = 0; 2505 for (int reg = 0; reg <= 30; reg++) { 2506 if (1 & bitset) 2507 regs[count++] = reg; 2508 bitset >>= 1; 2509 } 2510 regs[count++] = zr->raw_encoding(); 2511 count &= ~1; // Only push an even number of regs 2512 2513 if (count) { 2514 stp(as_Register(regs[0]), as_Register(regs[1]), 2515 Address(pre(stack, -count * wordSize))); 2516 words_pushed += 2; 2517 } 2518 for (int i = 2; i < count; i += 2) { 2519 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2520 Address(stack, i * wordSize)); 2521 words_pushed += 2; 2522 } 2523 2524 assert(words_pushed == count, "oops, pushed != count"); 2525 2526 return count; 2527 } 2528 2529 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2530 int words_pushed = 0; 2531 2532 // Scan bitset to accumulate register pairs 2533 unsigned char regs[32]; 2534 int count = 0; 2535 for (int reg = 0; reg <= 30; reg++) { 2536 if (1 & bitset) 2537 regs[count++] = reg; 2538 bitset >>= 1; 2539 } 2540 regs[count++] = zr->raw_encoding(); 2541 count &= ~1; 2542 2543 for (int i = 2; i < count; i += 2) { 2544 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2545 Address(stack, i * wordSize)); 2546 words_pushed += 2; 2547 } 2548 if (count) { 2549 ldp(as_Register(regs[0]), as_Register(regs[1]), 2550 Address(post(stack, count * wordSize))); 2551 words_pushed += 2; 2552 } 2553 2554 assert(words_pushed == count, "oops, pushed != count"); 2555 2556 return count; 2557 } 2558 2559 // Push lots of registers in the bit set supplied. Don't push sp. 2560 // Return the number of dwords pushed 2561 int MacroAssembler::push_fp(unsigned int bitset, Register stack) { 2562 int words_pushed = 0; 2563 bool use_sve = false; 2564 int sve_vector_size_in_bytes = 0; 2565 2566 #ifdef COMPILER2 2567 use_sve = Matcher::supports_scalable_vector(); 2568 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2569 #endif 2570 2571 // Scan bitset to accumulate register pairs 2572 unsigned char regs[32]; 2573 int count = 0; 2574 for (int reg = 0; reg <= 31; reg++) { 2575 if (1 & bitset) 2576 regs[count++] = reg; 2577 bitset >>= 1; 2578 } 2579 2580 if (count == 0) { 2581 return 0; 2582 } 2583 2584 // SVE 2585 if (use_sve && sve_vector_size_in_bytes > 16) { 2586 sub(stack, stack, sve_vector_size_in_bytes * count); 2587 for (int i = 0; i < count; i++) { 2588 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2589 } 2590 return count * sve_vector_size_in_bytes / 8; 2591 } 2592 2593 // NEON 2594 if (count == 1) { 2595 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2596 return 2; 2597 } 2598 2599 bool odd = (count & 1) == 1; 2600 int push_slots = count + (odd ? 1 : 0); 2601 2602 // Always pushing full 128 bit registers. 2603 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2604 words_pushed += 2; 2605 2606 for (int i = 2; i + 1 < count; i += 2) { 2607 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2608 words_pushed += 2; 2609 } 2610 2611 if (odd) { 2612 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2613 words_pushed++; 2614 } 2615 2616 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2617 return count * 2; 2618 } 2619 2620 // Return the number of dwords popped 2621 int MacroAssembler::pop_fp(unsigned int bitset, Register stack) { 2622 int words_pushed = 0; 2623 bool use_sve = false; 2624 int sve_vector_size_in_bytes = 0; 2625 2626 #ifdef COMPILER2 2627 use_sve = Matcher::supports_scalable_vector(); 2628 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2629 #endif 2630 // Scan bitset to accumulate register pairs 2631 unsigned char regs[32]; 2632 int count = 0; 2633 for (int reg = 0; reg <= 31; reg++) { 2634 if (1 & bitset) 2635 regs[count++] = reg; 2636 bitset >>= 1; 2637 } 2638 2639 if (count == 0) { 2640 return 0; 2641 } 2642 2643 // SVE 2644 if (use_sve && sve_vector_size_in_bytes > 16) { 2645 for (int i = count - 1; i >= 0; i--) { 2646 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2647 } 2648 add(stack, stack, sve_vector_size_in_bytes * count); 2649 return count * sve_vector_size_in_bytes / 8; 2650 } 2651 2652 // NEON 2653 if (count == 1) { 2654 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2655 return 2; 2656 } 2657 2658 bool odd = (count & 1) == 1; 2659 int push_slots = count + (odd ? 1 : 0); 2660 2661 if (odd) { 2662 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2663 words_pushed++; 2664 } 2665 2666 for (int i = 2; i + 1 < count; i += 2) { 2667 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2668 words_pushed += 2; 2669 } 2670 2671 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 2672 words_pushed += 2; 2673 2674 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2675 2676 return count * 2; 2677 } 2678 2679 // Return the number of dwords pushed 2680 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 2681 bool use_sve = false; 2682 int sve_predicate_size_in_slots = 0; 2683 2684 #ifdef COMPILER2 2685 use_sve = Matcher::supports_scalable_vector(); 2686 if (use_sve) { 2687 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2688 } 2689 #endif 2690 2691 if (!use_sve) { 2692 return 0; 2693 } 2694 2695 unsigned char regs[PRegister::number_of_registers]; 2696 int count = 0; 2697 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2698 if (1 & bitset) 2699 regs[count++] = reg; 2700 bitset >>= 1; 2701 } 2702 2703 if (count == 0) { 2704 return 0; 2705 } 2706 2707 int total_push_bytes = align_up(sve_predicate_size_in_slots * 2708 VMRegImpl::stack_slot_size * count, 16); 2709 sub(stack, stack, total_push_bytes); 2710 for (int i = 0; i < count; i++) { 2711 sve_str(as_PRegister(regs[i]), Address(stack, i)); 2712 } 2713 return total_push_bytes / 8; 2714 } 2715 2716 // Return the number of dwords popped 2717 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 2718 bool use_sve = false; 2719 int sve_predicate_size_in_slots = 0; 2720 2721 #ifdef COMPILER2 2722 use_sve = Matcher::supports_scalable_vector(); 2723 if (use_sve) { 2724 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 2725 } 2726 #endif 2727 2728 if (!use_sve) { 2729 return 0; 2730 } 2731 2732 unsigned char regs[PRegister::number_of_registers]; 2733 int count = 0; 2734 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 2735 if (1 & bitset) 2736 regs[count++] = reg; 2737 bitset >>= 1; 2738 } 2739 2740 if (count == 0) { 2741 return 0; 2742 } 2743 2744 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 2745 VMRegImpl::stack_slot_size * count, 16); 2746 for (int i = count - 1; i >= 0; i--) { 2747 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 2748 } 2749 add(stack, stack, total_pop_bytes); 2750 return total_pop_bytes / 8; 2751 } 2752 2753 #ifdef ASSERT 2754 void MacroAssembler::verify_heapbase(const char* msg) { 2755 #if 0 2756 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 2757 assert (Universe::heap() != nullptr, "java heap should be initialized"); 2758 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 2759 // rheapbase is allocated as general register 2760 return; 2761 } 2762 if (CheckCompressedOops) { 2763 Label ok; 2764 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 2765 cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2766 br(Assembler::EQ, ok); 2767 stop(msg); 2768 bind(ok); 2769 pop(1 << rscratch1->encoding(), sp); 2770 } 2771 #endif 2772 } 2773 #endif 2774 2775 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 2776 assert_different_registers(value, tmp1, tmp2); 2777 Label done, tagged, weak_tagged; 2778 2779 cbz(value, done); // Use null as-is. 2780 tst(value, JNIHandles::tag_mask); // Test for tag. 2781 br(Assembler::NE, tagged); 2782 2783 // Resolve local handle 2784 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 2785 verify_oop(value); 2786 b(done); 2787 2788 bind(tagged); 2789 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 2790 tbnz(value, 0, weak_tagged); // Test for weak tag. 2791 2792 // Resolve global handle 2793 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 2794 verify_oop(value); 2795 b(done); 2796 2797 bind(weak_tagged); 2798 // Resolve jweak. 2799 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 2800 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 2801 verify_oop(value); 2802 2803 bind(done); 2804 } 2805 2806 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 2807 assert_different_registers(value, tmp1, tmp2); 2808 Label done; 2809 2810 cbz(value, done); // Use null as-is. 2811 2812 #ifdef ASSERT 2813 { 2814 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 2815 Label valid_global_tag; 2816 tbnz(value, 1, valid_global_tag); // Test for global tag 2817 stop("non global jobject using resolve_global_jobject"); 2818 bind(valid_global_tag); 2819 } 2820 #endif 2821 2822 // Resolve global handle 2823 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 2824 verify_oop(value); 2825 2826 bind(done); 2827 } 2828 2829 void MacroAssembler::stop(const char* msg) { 2830 BLOCK_COMMENT(msg); 2831 dcps1(0xdeae); 2832 emit_int64((uintptr_t)msg); 2833 } 2834 2835 void MacroAssembler::unimplemented(const char* what) { 2836 const char* buf = nullptr; 2837 { 2838 ResourceMark rm; 2839 stringStream ss; 2840 ss.print("unimplemented: %s", what); 2841 buf = code_string(ss.as_string()); 2842 } 2843 stop(buf); 2844 } 2845 2846 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 2847 #ifdef ASSERT 2848 Label OK; 2849 br(cc, OK); 2850 stop(msg); 2851 bind(OK); 2852 #endif 2853 } 2854 2855 // If a constant does not fit in an immediate field, generate some 2856 // number of MOV instructions and then perform the operation. 2857 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 2858 add_sub_imm_insn insn1, 2859 add_sub_reg_insn insn2, 2860 bool is32) { 2861 assert(Rd != zr, "Rd = zr and not setting flags?"); 2862 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 2863 if (fits) { 2864 (this->*insn1)(Rd, Rn, imm); 2865 } else { 2866 if (uabs(imm) < (1 << 24)) { 2867 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 2868 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 2869 } else { 2870 assert_different_registers(Rd, Rn); 2871 mov(Rd, imm); 2872 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2873 } 2874 } 2875 } 2876 2877 // Separate vsn which sets the flags. Optimisations are more restricted 2878 // because we must set the flags correctly. 2879 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 2880 add_sub_imm_insn insn1, 2881 add_sub_reg_insn insn2, 2882 bool is32) { 2883 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 2884 if (fits) { 2885 (this->*insn1)(Rd, Rn, imm); 2886 } else { 2887 assert_different_registers(Rd, Rn); 2888 assert(Rd != zr, "overflow in immediate operand"); 2889 mov(Rd, imm); 2890 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2891 } 2892 } 2893 2894 2895 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 2896 if (increment.is_register()) { 2897 add(Rd, Rn, increment.as_register()); 2898 } else { 2899 add(Rd, Rn, increment.as_constant()); 2900 } 2901 } 2902 2903 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2904 if (increment.is_register()) { 2905 addw(Rd, Rn, increment.as_register()); 2906 } else { 2907 addw(Rd, Rn, increment.as_constant()); 2908 } 2909 } 2910 2911 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 2912 if (decrement.is_register()) { 2913 sub(Rd, Rn, decrement.as_register()); 2914 } else { 2915 sub(Rd, Rn, decrement.as_constant()); 2916 } 2917 } 2918 2919 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 2920 if (decrement.is_register()) { 2921 subw(Rd, Rn, decrement.as_register()); 2922 } else { 2923 subw(Rd, Rn, decrement.as_constant()); 2924 } 2925 } 2926 2927 void MacroAssembler::reinit_heapbase() 2928 { 2929 if (UseCompressedOops) { 2930 if (Universe::is_fully_initialized()) { 2931 mov(rheapbase, CompressedOops::ptrs_base()); 2932 } else { 2933 lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 2934 ldr(rheapbase, Address(rheapbase)); 2935 } 2936 } 2937 } 2938 2939 // this simulates the behaviour of the x86 cmpxchg instruction using a 2940 // load linked/store conditional pair. we use the acquire/release 2941 // versions of these instructions so that we flush pending writes as 2942 // per Java semantics. 2943 2944 // n.b the x86 version assumes the old value to be compared against is 2945 // in rax and updates rax with the value located in memory if the 2946 // cmpxchg fails. we supply a register for the old value explicitly 2947 2948 // the aarch64 load linked/store conditional instructions do not 2949 // accept an offset. so, unlike x86, we must provide a plain register 2950 // to identify the memory word to be compared/exchanged rather than a 2951 // register+offset Address. 2952 2953 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2954 Label &succeed, Label *fail) { 2955 // oldv holds comparison value 2956 // newv holds value to write in exchange 2957 // addr identifies memory word to compare against/update 2958 if (UseLSE) { 2959 mov(tmp, oldv); 2960 casal(Assembler::xword, oldv, newv, addr); 2961 cmp(tmp, oldv); 2962 br(Assembler::EQ, succeed); 2963 membar(AnyAny); 2964 } else { 2965 Label retry_load, nope; 2966 prfm(Address(addr), PSTL1STRM); 2967 bind(retry_load); 2968 // flush and load exclusive from the memory location 2969 // and fail if it is not what we expect 2970 ldaxr(tmp, addr); 2971 cmp(tmp, oldv); 2972 br(Assembler::NE, nope); 2973 // if we store+flush with no intervening write tmp will be zero 2974 stlxr(tmp, newv, addr); 2975 cbzw(tmp, succeed); 2976 // retry so we only ever return after a load fails to compare 2977 // ensures we don't return a stale value after a failed write. 2978 b(retry_load); 2979 // if the memory word differs we return it in oldv and signal a fail 2980 bind(nope); 2981 membar(AnyAny); 2982 mov(oldv, tmp); 2983 } 2984 if (fail) 2985 b(*fail); 2986 } 2987 2988 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 2989 Label &succeed, Label *fail) { 2990 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 2991 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 2992 } 2993 2994 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2995 Label &succeed, Label *fail) { 2996 // oldv holds comparison value 2997 // newv holds value to write in exchange 2998 // addr identifies memory word to compare against/update 2999 // tmp returns 0/1 for success/failure 3000 if (UseLSE) { 3001 mov(tmp, oldv); 3002 casal(Assembler::word, oldv, newv, addr); 3003 cmp(tmp, oldv); 3004 br(Assembler::EQ, succeed); 3005 membar(AnyAny); 3006 } else { 3007 Label retry_load, nope; 3008 prfm(Address(addr), PSTL1STRM); 3009 bind(retry_load); 3010 // flush and load exclusive from the memory location 3011 // and fail if it is not what we expect 3012 ldaxrw(tmp, addr); 3013 cmp(tmp, oldv); 3014 br(Assembler::NE, nope); 3015 // if we store+flush with no intervening write tmp will be zero 3016 stlxrw(tmp, newv, addr); 3017 cbzw(tmp, succeed); 3018 // retry so we only ever return after a load fails to compare 3019 // ensures we don't return a stale value after a failed write. 3020 b(retry_load); 3021 // if the memory word differs we return it in oldv and signal a fail 3022 bind(nope); 3023 membar(AnyAny); 3024 mov(oldv, tmp); 3025 } 3026 if (fail) 3027 b(*fail); 3028 } 3029 3030 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3031 // doesn't retry and may fail spuriously. If the oldval is wanted, 3032 // Pass a register for the result, otherwise pass noreg. 3033 3034 // Clobbers rscratch1 3035 void MacroAssembler::cmpxchg(Register addr, Register expected, 3036 Register new_val, 3037 enum operand_size size, 3038 bool acquire, bool release, 3039 bool weak, 3040 Register result) { 3041 if (result == noreg) result = rscratch1; 3042 BLOCK_COMMENT("cmpxchg {"); 3043 if (UseLSE) { 3044 mov(result, expected); 3045 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3046 compare_eq(result, expected, size); 3047 #ifdef ASSERT 3048 // Poison rscratch1 which is written on !UseLSE branch 3049 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3050 #endif 3051 } else { 3052 Label retry_load, done; 3053 prfm(Address(addr), PSTL1STRM); 3054 bind(retry_load); 3055 load_exclusive(result, addr, size, acquire); 3056 compare_eq(result, expected, size); 3057 br(Assembler::NE, done); 3058 store_exclusive(rscratch1, new_val, addr, size, release); 3059 if (weak) { 3060 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3061 } else { 3062 cbnzw(rscratch1, retry_load); 3063 } 3064 bind(done); 3065 } 3066 BLOCK_COMMENT("} cmpxchg"); 3067 } 3068 3069 // A generic comparison. Only compares for equality, clobbers rscratch1. 3070 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3071 if (size == xword) { 3072 cmp(rm, rn); 3073 } else if (size == word) { 3074 cmpw(rm, rn); 3075 } else if (size == halfword) { 3076 eorw(rscratch1, rm, rn); 3077 ands(zr, rscratch1, 0xffff); 3078 } else if (size == byte) { 3079 eorw(rscratch1, rm, rn); 3080 ands(zr, rscratch1, 0xff); 3081 } else { 3082 ShouldNotReachHere(); 3083 } 3084 } 3085 3086 3087 static bool different(Register a, RegisterOrConstant b, Register c) { 3088 if (b.is_constant()) 3089 return a != c; 3090 else 3091 return a != b.as_register() && a != c && b.as_register() != c; 3092 } 3093 3094 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3095 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3096 if (UseLSE) { \ 3097 prev = prev->is_valid() ? prev : zr; \ 3098 if (incr.is_register()) { \ 3099 AOP(sz, incr.as_register(), prev, addr); \ 3100 } else { \ 3101 mov(rscratch2, incr.as_constant()); \ 3102 AOP(sz, rscratch2, prev, addr); \ 3103 } \ 3104 return; \ 3105 } \ 3106 Register result = rscratch2; \ 3107 if (prev->is_valid()) \ 3108 result = different(prev, incr, addr) ? prev : rscratch2; \ 3109 \ 3110 Label retry_load; \ 3111 prfm(Address(addr), PSTL1STRM); \ 3112 bind(retry_load); \ 3113 LDXR(result, addr); \ 3114 OP(rscratch1, result, incr); \ 3115 STXR(rscratch2, rscratch1, addr); \ 3116 cbnzw(rscratch2, retry_load); \ 3117 if (prev->is_valid() && prev != result) { \ 3118 IOP(prev, rscratch1, incr); \ 3119 } \ 3120 } 3121 3122 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3123 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3124 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3125 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3126 3127 #undef ATOMIC_OP 3128 3129 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3130 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3131 if (UseLSE) { \ 3132 prev = prev->is_valid() ? prev : zr; \ 3133 AOP(sz, newv, prev, addr); \ 3134 return; \ 3135 } \ 3136 Register result = rscratch2; \ 3137 if (prev->is_valid()) \ 3138 result = different(prev, newv, addr) ? prev : rscratch2; \ 3139 \ 3140 Label retry_load; \ 3141 prfm(Address(addr), PSTL1STRM); \ 3142 bind(retry_load); \ 3143 LDXR(result, addr); \ 3144 STXR(rscratch1, newv, addr); \ 3145 cbnzw(rscratch1, retry_load); \ 3146 if (prev->is_valid() && prev != result) \ 3147 mov(prev, result); \ 3148 } 3149 3150 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3151 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3152 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3153 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3154 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3155 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3156 3157 #undef ATOMIC_XCHG 3158 3159 #ifndef PRODUCT 3160 extern "C" void findpc(intptr_t x); 3161 #endif 3162 3163 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3164 { 3165 // In order to get locks to work, we need to fake a in_VM state 3166 if (ShowMessageBoxOnError ) { 3167 JavaThread* thread = JavaThread::current(); 3168 JavaThreadState saved_state = thread->thread_state(); 3169 thread->set_thread_state(_thread_in_vm); 3170 #ifndef PRODUCT 3171 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3172 ttyLocker ttyl; 3173 BytecodeCounter::print(); 3174 } 3175 #endif 3176 if (os::message_box(msg, "Execution stopped, print registers?")) { 3177 ttyLocker ttyl; 3178 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3179 #ifndef PRODUCT 3180 tty->cr(); 3181 findpc(pc); 3182 tty->cr(); 3183 #endif 3184 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3185 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3186 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3187 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3188 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3189 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3190 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3191 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3192 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3193 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3194 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3195 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3196 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3197 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3198 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3199 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3200 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3201 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3202 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3203 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3204 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3205 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3206 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3207 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3208 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3209 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3210 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3211 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3212 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3213 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3214 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3215 BREAKPOINT; 3216 } 3217 } 3218 fatal("DEBUG MESSAGE: %s", msg); 3219 } 3220 3221 RegSet MacroAssembler::call_clobbered_gp_registers() { 3222 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3223 #ifndef R18_RESERVED 3224 regs += r18_tls; 3225 #endif 3226 return regs; 3227 } 3228 3229 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3230 int step = 4 * wordSize; 3231 push(call_clobbered_gp_registers() - exclude, sp); 3232 sub(sp, sp, step); 3233 mov(rscratch1, -step); 3234 // Push v0-v7, v16-v31. 3235 for (int i = 31; i>= 4; i -= 4) { 3236 if (i <= v7->encoding() || i >= v16->encoding()) 3237 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3238 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3239 } 3240 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3241 as_FloatRegister(3), T1D, Address(sp)); 3242 } 3243 3244 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3245 for (int i = 0; i < 32; i += 4) { 3246 if (i <= v7->encoding() || i >= v16->encoding()) 3247 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3248 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3249 } 3250 3251 reinitialize_ptrue(); 3252 3253 pop(call_clobbered_gp_registers() - exclude, sp); 3254 } 3255 3256 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3257 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3258 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3259 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3260 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3261 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3262 sve_str(as_FloatRegister(i), Address(sp, i)); 3263 } 3264 } else { 3265 int step = (save_vectors ? 8 : 4) * wordSize; 3266 mov(rscratch1, -step); 3267 sub(sp, sp, step); 3268 for (int i = 28; i >= 4; i -= 4) { 3269 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3270 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3271 } 3272 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3273 } 3274 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3275 sub(sp, sp, total_predicate_in_bytes); 3276 for (int i = 0; i < PRegister::number_of_registers; i++) { 3277 sve_str(as_PRegister(i), Address(sp, i)); 3278 } 3279 } 3280 } 3281 3282 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3283 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3284 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3285 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3286 sve_ldr(as_PRegister(i), Address(sp, i)); 3287 } 3288 add(sp, sp, total_predicate_in_bytes); 3289 } 3290 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3291 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3292 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3293 } 3294 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3295 } else { 3296 int step = (restore_vectors ? 8 : 4) * wordSize; 3297 for (int i = 0; i <= 28; i += 4) 3298 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3299 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3300 } 3301 3302 // We may use predicate registers and rely on ptrue with SVE, 3303 // regardless of wide vector (> 8 bytes) used or not. 3304 if (use_sve) { 3305 reinitialize_ptrue(); 3306 } 3307 3308 // integer registers except lr & sp 3309 pop(RegSet::range(r0, r17), sp); 3310 #ifdef R18_RESERVED 3311 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3312 pop(RegSet::range(r20, r29), sp); 3313 #else 3314 pop(RegSet::range(r18_tls, r29), sp); 3315 #endif 3316 } 3317 3318 /** 3319 * Helpers for multiply_to_len(). 3320 */ 3321 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3322 Register src1, Register src2) { 3323 adds(dest_lo, dest_lo, src1); 3324 adc(dest_hi, dest_hi, zr); 3325 adds(dest_lo, dest_lo, src2); 3326 adc(final_dest_hi, dest_hi, zr); 3327 } 3328 3329 // Generate an address from (r + r1 extend offset). "size" is the 3330 // size of the operand. The result may be in rscratch2. 3331 Address MacroAssembler::offsetted_address(Register r, Register r1, 3332 Address::extend ext, int offset, int size) { 3333 if (offset || (ext.shift() % size != 0)) { 3334 lea(rscratch2, Address(r, r1, ext)); 3335 return Address(rscratch2, offset); 3336 } else { 3337 return Address(r, r1, ext); 3338 } 3339 } 3340 3341 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3342 { 3343 assert(offset >= 0, "spill to negative address?"); 3344 // Offset reachable ? 3345 // Not aligned - 9 bits signed offset 3346 // Aligned - 12 bits unsigned offset shifted 3347 Register base = sp; 3348 if ((offset & (size-1)) && offset >= (1<<8)) { 3349 add(tmp, base, offset & ((1<<12)-1)); 3350 base = tmp; 3351 offset &= -1u<<12; 3352 } 3353 3354 if (offset >= (1<<12) * size) { 3355 add(tmp, base, offset & (((1<<12)-1)<<12)); 3356 base = tmp; 3357 offset &= ~(((1<<12)-1)<<12); 3358 } 3359 3360 return Address(base, offset); 3361 } 3362 3363 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3364 assert(offset >= 0, "spill to negative address?"); 3365 3366 Register base = sp; 3367 3368 // An immediate offset in the range 0 to 255 which is multiplied 3369 // by the current vector or predicate register size in bytes. 3370 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3371 return Address(base, offset / sve_reg_size_in_bytes); 3372 } 3373 3374 add(tmp, base, offset); 3375 return Address(tmp); 3376 } 3377 3378 // Checks whether offset is aligned. 3379 // Returns true if it is, else false. 3380 bool MacroAssembler::merge_alignment_check(Register base, 3381 size_t size, 3382 int64_t cur_offset, 3383 int64_t prev_offset) const { 3384 if (AvoidUnalignedAccesses) { 3385 if (base == sp) { 3386 // Checks whether low offset if aligned to pair of registers. 3387 int64_t pair_mask = size * 2 - 1; 3388 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3389 return (offset & pair_mask) == 0; 3390 } else { // If base is not sp, we can't guarantee the access is aligned. 3391 return false; 3392 } 3393 } else { 3394 int64_t mask = size - 1; 3395 // Load/store pair instruction only supports element size aligned offset. 3396 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3397 } 3398 } 3399 3400 // Checks whether current and previous loads/stores can be merged. 3401 // Returns true if it can be merged, else false. 3402 bool MacroAssembler::ldst_can_merge(Register rt, 3403 const Address &adr, 3404 size_t cur_size_in_bytes, 3405 bool is_store) const { 3406 address prev = pc() - NativeInstruction::instruction_size; 3407 address last = code()->last_insn(); 3408 3409 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3410 return false; 3411 } 3412 3413 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3414 return false; 3415 } 3416 3417 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3418 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3419 3420 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3421 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3422 3423 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3424 return false; 3425 } 3426 3427 int64_t max_offset = 63 * prev_size_in_bytes; 3428 int64_t min_offset = -64 * prev_size_in_bytes; 3429 3430 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3431 3432 // Only same base can be merged. 3433 if (adr.base() != prev_ldst->base()) { 3434 return false; 3435 } 3436 3437 int64_t cur_offset = adr.offset(); 3438 int64_t prev_offset = prev_ldst->offset(); 3439 size_t diff = abs(cur_offset - prev_offset); 3440 if (diff != prev_size_in_bytes) { 3441 return false; 3442 } 3443 3444 // Following cases can not be merged: 3445 // ldr x2, [x2, #8] 3446 // ldr x3, [x2, #16] 3447 // or: 3448 // ldr x2, [x3, #8] 3449 // ldr x2, [x3, #16] 3450 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3451 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3452 return false; 3453 } 3454 3455 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3456 // Offset range must be in ldp/stp instruction's range. 3457 if (low_offset > max_offset || low_offset < min_offset) { 3458 return false; 3459 } 3460 3461 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3462 return true; 3463 } 3464 3465 return false; 3466 } 3467 3468 // Merge current load/store with previous load/store into ldp/stp. 3469 void MacroAssembler::merge_ldst(Register rt, 3470 const Address &adr, 3471 size_t cur_size_in_bytes, 3472 bool is_store) { 3473 3474 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3475 3476 Register rt_low, rt_high; 3477 address prev = pc() - NativeInstruction::instruction_size; 3478 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3479 3480 int64_t offset; 3481 3482 if (adr.offset() < prev_ldst->offset()) { 3483 offset = adr.offset(); 3484 rt_low = rt; 3485 rt_high = prev_ldst->target(); 3486 } else { 3487 offset = prev_ldst->offset(); 3488 rt_low = prev_ldst->target(); 3489 rt_high = rt; 3490 } 3491 3492 Address adr_p = Address(prev_ldst->base(), offset); 3493 // Overwrite previous generated binary. 3494 code_section()->set_end(prev); 3495 3496 const size_t sz = prev_ldst->size_in_bytes(); 3497 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3498 if (!is_store) { 3499 BLOCK_COMMENT("merged ldr pair"); 3500 if (sz == 8) { 3501 ldp(rt_low, rt_high, adr_p); 3502 } else { 3503 ldpw(rt_low, rt_high, adr_p); 3504 } 3505 } else { 3506 BLOCK_COMMENT("merged str pair"); 3507 if (sz == 8) { 3508 stp(rt_low, rt_high, adr_p); 3509 } else { 3510 stpw(rt_low, rt_high, adr_p); 3511 } 3512 } 3513 } 3514 3515 /** 3516 * Multiply 64 bit by 64 bit first loop. 3517 */ 3518 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3519 Register y, Register y_idx, Register z, 3520 Register carry, Register product, 3521 Register idx, Register kdx) { 3522 // 3523 // jlong carry, x[], y[], z[]; 3524 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3525 // huge_128 product = y[idx] * x[xstart] + carry; 3526 // z[kdx] = (jlong)product; 3527 // carry = (jlong)(product >>> 64); 3528 // } 3529 // z[xstart] = carry; 3530 // 3531 3532 Label L_first_loop, L_first_loop_exit; 3533 Label L_one_x, L_one_y, L_multiply; 3534 3535 subsw(xstart, xstart, 1); 3536 br(Assembler::MI, L_one_x); 3537 3538 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3539 ldr(x_xstart, Address(rscratch1)); 3540 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3541 3542 bind(L_first_loop); 3543 subsw(idx, idx, 1); 3544 br(Assembler::MI, L_first_loop_exit); 3545 subsw(idx, idx, 1); 3546 br(Assembler::MI, L_one_y); 3547 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3548 ldr(y_idx, Address(rscratch1)); 3549 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3550 bind(L_multiply); 3551 3552 // AArch64 has a multiply-accumulate instruction that we can't use 3553 // here because it has no way to process carries, so we have to use 3554 // separate add and adc instructions. Bah. 3555 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3556 mul(product, x_xstart, y_idx); 3557 adds(product, product, carry); 3558 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3559 3560 subw(kdx, kdx, 2); 3561 ror(product, product, 32); // back to big-endian 3562 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3563 3564 b(L_first_loop); 3565 3566 bind(L_one_y); 3567 ldrw(y_idx, Address(y, 0)); 3568 b(L_multiply); 3569 3570 bind(L_one_x); 3571 ldrw(x_xstart, Address(x, 0)); 3572 b(L_first_loop); 3573 3574 bind(L_first_loop_exit); 3575 } 3576 3577 /** 3578 * Multiply 128 bit by 128. Unrolled inner loop. 3579 * 3580 */ 3581 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3582 Register carry, Register carry2, 3583 Register idx, Register jdx, 3584 Register yz_idx1, Register yz_idx2, 3585 Register tmp, Register tmp3, Register tmp4, 3586 Register tmp6, Register product_hi) { 3587 3588 // jlong carry, x[], y[], z[]; 3589 // int kdx = ystart+1; 3590 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3591 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3592 // jlong carry2 = (jlong)(tmp3 >>> 64); 3593 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3594 // carry = (jlong)(tmp4 >>> 64); 3595 // z[kdx+idx+1] = (jlong)tmp3; 3596 // z[kdx+idx] = (jlong)tmp4; 3597 // } 3598 // idx += 2; 3599 // if (idx > 0) { 3600 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3601 // z[kdx+idx] = (jlong)yz_idx1; 3602 // carry = (jlong)(yz_idx1 >>> 64); 3603 // } 3604 // 3605 3606 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3607 3608 lsrw(jdx, idx, 2); 3609 3610 bind(L_third_loop); 3611 3612 subsw(jdx, jdx, 1); 3613 br(Assembler::MI, L_third_loop_exit); 3614 subw(idx, idx, 4); 3615 3616 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3617 3618 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3619 3620 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3621 3622 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3623 ror(yz_idx2, yz_idx2, 32); 3624 3625 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3626 3627 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3628 umulh(tmp4, product_hi, yz_idx1); 3629 3630 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3631 ror(rscratch2, rscratch2, 32); 3632 3633 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3634 umulh(carry2, product_hi, yz_idx2); 3635 3636 // propagate sum of both multiplications into carry:tmp4:tmp3 3637 adds(tmp3, tmp3, carry); 3638 adc(tmp4, tmp4, zr); 3639 adds(tmp3, tmp3, rscratch1); 3640 adcs(tmp4, tmp4, tmp); 3641 adc(carry, carry2, zr); 3642 adds(tmp4, tmp4, rscratch2); 3643 adc(carry, carry, zr); 3644 3645 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 3646 ror(tmp4, tmp4, 32); 3647 stp(tmp4, tmp3, Address(tmp6, 0)); 3648 3649 b(L_third_loop); 3650 bind (L_third_loop_exit); 3651 3652 andw (idx, idx, 0x3); 3653 cbz(idx, L_post_third_loop_done); 3654 3655 Label L_check_1; 3656 subsw(idx, idx, 2); 3657 br(Assembler::MI, L_check_1); 3658 3659 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3660 ldr(yz_idx1, Address(rscratch1, 0)); 3661 ror(yz_idx1, yz_idx1, 32); 3662 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3663 umulh(tmp4, product_hi, yz_idx1); 3664 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3665 ldr(yz_idx2, Address(rscratch1, 0)); 3666 ror(yz_idx2, yz_idx2, 32); 3667 3668 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 3669 3670 ror(tmp3, tmp3, 32); 3671 str(tmp3, Address(rscratch1, 0)); 3672 3673 bind (L_check_1); 3674 3675 andw (idx, idx, 0x1); 3676 subsw(idx, idx, 1); 3677 br(Assembler::MI, L_post_third_loop_done); 3678 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3679 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 3680 umulh(carry2, tmp4, product_hi); 3681 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3682 3683 add2_with_carry(carry2, tmp3, tmp4, carry); 3684 3685 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3686 extr(carry, carry2, tmp3, 32); 3687 3688 bind(L_post_third_loop_done); 3689 } 3690 3691 /** 3692 * Code for BigInteger::multiplyToLen() intrinsic. 3693 * 3694 * r0: x 3695 * r1: xlen 3696 * r2: y 3697 * r3: ylen 3698 * r4: z 3699 * r5: zlen 3700 * r10: tmp1 3701 * r11: tmp2 3702 * r12: tmp3 3703 * r13: tmp4 3704 * r14: tmp5 3705 * r15: tmp6 3706 * r16: tmp7 3707 * 3708 */ 3709 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 3710 Register z, Register zlen, 3711 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3712 Register tmp5, Register tmp6, Register product_hi) { 3713 3714 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 3715 3716 const Register idx = tmp1; 3717 const Register kdx = tmp2; 3718 const Register xstart = tmp3; 3719 3720 const Register y_idx = tmp4; 3721 const Register carry = tmp5; 3722 const Register product = xlen; 3723 const Register x_xstart = zlen; // reuse register 3724 3725 // First Loop. 3726 // 3727 // final static long LONG_MASK = 0xffffffffL; 3728 // int xstart = xlen - 1; 3729 // int ystart = ylen - 1; 3730 // long carry = 0; 3731 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3732 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 3733 // z[kdx] = (int)product; 3734 // carry = product >>> 32; 3735 // } 3736 // z[xstart] = (int)carry; 3737 // 3738 3739 movw(idx, ylen); // idx = ylen; 3740 movw(kdx, zlen); // kdx = xlen+ylen; 3741 mov(carry, zr); // carry = 0; 3742 3743 Label L_done; 3744 3745 movw(xstart, xlen); 3746 subsw(xstart, xstart, 1); 3747 br(Assembler::MI, L_done); 3748 3749 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 3750 3751 Label L_second_loop; 3752 cbzw(kdx, L_second_loop); 3753 3754 Label L_carry; 3755 subw(kdx, kdx, 1); 3756 cbzw(kdx, L_carry); 3757 3758 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3759 lsr(carry, carry, 32); 3760 subw(kdx, kdx, 1); 3761 3762 bind(L_carry); 3763 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 3764 3765 // Second and third (nested) loops. 3766 // 3767 // for (int i = xstart-1; i >= 0; i--) { // Second loop 3768 // carry = 0; 3769 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 3770 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 3771 // (z[k] & LONG_MASK) + carry; 3772 // z[k] = (int)product; 3773 // carry = product >>> 32; 3774 // } 3775 // z[i] = (int)carry; 3776 // } 3777 // 3778 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 3779 3780 const Register jdx = tmp1; 3781 3782 bind(L_second_loop); 3783 mov(carry, zr); // carry = 0; 3784 movw(jdx, ylen); // j = ystart+1 3785 3786 subsw(xstart, xstart, 1); // i = xstart-1; 3787 br(Assembler::MI, L_done); 3788 3789 str(z, Address(pre(sp, -4 * wordSize))); 3790 3791 Label L_last_x; 3792 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 3793 subsw(xstart, xstart, 1); // i = xstart-1; 3794 br(Assembler::MI, L_last_x); 3795 3796 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 3797 ldr(product_hi, Address(rscratch1)); 3798 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 3799 3800 Label L_third_loop_prologue; 3801 bind(L_third_loop_prologue); 3802 3803 str(ylen, Address(sp, wordSize)); 3804 stp(x, xstart, Address(sp, 2 * wordSize)); 3805 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 3806 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 3807 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 3808 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 3809 3810 addw(tmp3, xlen, 1); 3811 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 3812 subsw(tmp3, tmp3, 1); 3813 br(Assembler::MI, L_done); 3814 3815 lsr(carry, carry, 32); 3816 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 3817 b(L_second_loop); 3818 3819 // Next infrequent code is moved outside loops. 3820 bind(L_last_x); 3821 ldrw(product_hi, Address(x, 0)); 3822 b(L_third_loop_prologue); 3823 3824 bind(L_done); 3825 } 3826 3827 // Code for BigInteger::mulAdd intrinsic 3828 // out = r0 3829 // in = r1 3830 // offset = r2 (already out.length-offset) 3831 // len = r3 3832 // k = r4 3833 // 3834 // pseudo code from java implementation: 3835 // carry = 0; 3836 // offset = out.length-offset - 1; 3837 // for (int j=len-1; j >= 0; j--) { 3838 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 3839 // out[offset--] = (int)product; 3840 // carry = product >>> 32; 3841 // } 3842 // return (int)carry; 3843 void MacroAssembler::mul_add(Register out, Register in, Register offset, 3844 Register len, Register k) { 3845 Label LOOP, END; 3846 // pre-loop 3847 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 3848 csel(out, zr, out, Assembler::EQ); 3849 br(Assembler::EQ, END); 3850 add(in, in, len, LSL, 2); // in[j+1] address 3851 add(offset, out, offset, LSL, 2); // out[offset + 1] address 3852 mov(out, zr); // used to keep carry now 3853 BIND(LOOP); 3854 ldrw(rscratch1, Address(pre(in, -4))); 3855 madd(rscratch1, rscratch1, k, out); 3856 ldrw(rscratch2, Address(pre(offset, -4))); 3857 add(rscratch1, rscratch1, rscratch2); 3858 strw(rscratch1, Address(offset)); 3859 lsr(out, rscratch1, 32); 3860 subs(len, len, 1); 3861 br(Assembler::NE, LOOP); 3862 BIND(END); 3863 } 3864 3865 /** 3866 * Emits code to update CRC-32 with a byte value according to constants in table 3867 * 3868 * @param [in,out]crc Register containing the crc. 3869 * @param [in]val Register containing the byte to fold into the CRC. 3870 * @param [in]table Register containing the table of crc constants. 3871 * 3872 * uint32_t crc; 3873 * val = crc_table[(val ^ crc) & 0xFF]; 3874 * crc = val ^ (crc >> 8); 3875 * 3876 */ 3877 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 3878 eor(val, val, crc); 3879 andr(val, val, 0xff); 3880 ldrw(val, Address(table, val, Address::lsl(2))); 3881 eor(crc, val, crc, Assembler::LSR, 8); 3882 } 3883 3884 /** 3885 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 3886 * 3887 * @param [in,out]crc Register containing the crc. 3888 * @param [in]v Register containing the 32-bit to fold into the CRC. 3889 * @param [in]table0 Register containing table 0 of crc constants. 3890 * @param [in]table1 Register containing table 1 of crc constants. 3891 * @param [in]table2 Register containing table 2 of crc constants. 3892 * @param [in]table3 Register containing table 3 of crc constants. 3893 * 3894 * uint32_t crc; 3895 * v = crc ^ v 3896 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 3897 * 3898 */ 3899 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 3900 Register table0, Register table1, Register table2, Register table3, 3901 bool upper) { 3902 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 3903 uxtb(tmp, v); 3904 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 3905 ubfx(tmp, v, 8, 8); 3906 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 3907 eor(crc, crc, tmp); 3908 ubfx(tmp, v, 16, 8); 3909 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 3910 eor(crc, crc, tmp); 3911 ubfx(tmp, v, 24, 8); 3912 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 3913 eor(crc, crc, tmp); 3914 } 3915 3916 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 3917 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 3918 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 3919 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 3920 3921 subs(tmp0, len, 384); 3922 mvnw(crc, crc); 3923 br(Assembler::GE, CRC_by128_pre); 3924 BIND(CRC_less128); 3925 subs(len, len, 32); 3926 br(Assembler::GE, CRC_by32_loop); 3927 BIND(CRC_less32); 3928 adds(len, len, 32 - 4); 3929 br(Assembler::GE, CRC_by4_loop); 3930 adds(len, len, 4); 3931 br(Assembler::GT, CRC_by1_loop); 3932 b(L_exit); 3933 3934 BIND(CRC_by32_loop); 3935 ldp(tmp0, tmp1, Address(buf)); 3936 crc32x(crc, crc, tmp0); 3937 ldp(tmp2, tmp3, Address(buf, 16)); 3938 crc32x(crc, crc, tmp1); 3939 add(buf, buf, 32); 3940 crc32x(crc, crc, tmp2); 3941 subs(len, len, 32); 3942 crc32x(crc, crc, tmp3); 3943 br(Assembler::GE, CRC_by32_loop); 3944 cmn(len, (u1)32); 3945 br(Assembler::NE, CRC_less32); 3946 b(L_exit); 3947 3948 BIND(CRC_by4_loop); 3949 ldrw(tmp0, Address(post(buf, 4))); 3950 subs(len, len, 4); 3951 crc32w(crc, crc, tmp0); 3952 br(Assembler::GE, CRC_by4_loop); 3953 adds(len, len, 4); 3954 br(Assembler::LE, L_exit); 3955 BIND(CRC_by1_loop); 3956 ldrb(tmp0, Address(post(buf, 1))); 3957 subs(len, len, 1); 3958 crc32b(crc, crc, tmp0); 3959 br(Assembler::GT, CRC_by1_loop); 3960 b(L_exit); 3961 3962 BIND(CRC_by128_pre); 3963 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 3964 4*256*sizeof(juint) + 8*sizeof(juint)); 3965 mov(crc, 0); 3966 crc32x(crc, crc, tmp0); 3967 crc32x(crc, crc, tmp1); 3968 3969 cbnz(len, CRC_less128); 3970 3971 BIND(L_exit); 3972 mvnw(crc, crc); 3973 } 3974 3975 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 3976 Register len, Register tmp0, Register tmp1, Register tmp2, 3977 Register tmp3) { 3978 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 3979 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 3980 3981 mvnw(crc, crc); 3982 3983 subs(len, len, 128); 3984 br(Assembler::GE, CRC_by64_pre); 3985 BIND(CRC_less64); 3986 adds(len, len, 128-32); 3987 br(Assembler::GE, CRC_by32_loop); 3988 BIND(CRC_less32); 3989 adds(len, len, 32-4); 3990 br(Assembler::GE, CRC_by4_loop); 3991 adds(len, len, 4); 3992 br(Assembler::GT, CRC_by1_loop); 3993 b(L_exit); 3994 3995 BIND(CRC_by32_loop); 3996 ldp(tmp0, tmp1, Address(post(buf, 16))); 3997 subs(len, len, 32); 3998 crc32x(crc, crc, tmp0); 3999 ldr(tmp2, Address(post(buf, 8))); 4000 crc32x(crc, crc, tmp1); 4001 ldr(tmp3, Address(post(buf, 8))); 4002 crc32x(crc, crc, tmp2); 4003 crc32x(crc, crc, tmp3); 4004 br(Assembler::GE, CRC_by32_loop); 4005 cmn(len, (u1)32); 4006 br(Assembler::NE, CRC_less32); 4007 b(L_exit); 4008 4009 BIND(CRC_by4_loop); 4010 ldrw(tmp0, Address(post(buf, 4))); 4011 subs(len, len, 4); 4012 crc32w(crc, crc, tmp0); 4013 br(Assembler::GE, CRC_by4_loop); 4014 adds(len, len, 4); 4015 br(Assembler::LE, L_exit); 4016 BIND(CRC_by1_loop); 4017 ldrb(tmp0, Address(post(buf, 1))); 4018 subs(len, len, 1); 4019 crc32b(crc, crc, tmp0); 4020 br(Assembler::GT, CRC_by1_loop); 4021 b(L_exit); 4022 4023 BIND(CRC_by64_pre); 4024 sub(buf, buf, 8); 4025 ldp(tmp0, tmp1, Address(buf, 8)); 4026 crc32x(crc, crc, tmp0); 4027 ldr(tmp2, Address(buf, 24)); 4028 crc32x(crc, crc, tmp1); 4029 ldr(tmp3, Address(buf, 32)); 4030 crc32x(crc, crc, tmp2); 4031 ldr(tmp0, Address(buf, 40)); 4032 crc32x(crc, crc, tmp3); 4033 ldr(tmp1, Address(buf, 48)); 4034 crc32x(crc, crc, tmp0); 4035 ldr(tmp2, Address(buf, 56)); 4036 crc32x(crc, crc, tmp1); 4037 ldr(tmp3, Address(pre(buf, 64))); 4038 4039 b(CRC_by64_loop); 4040 4041 align(CodeEntryAlignment); 4042 BIND(CRC_by64_loop); 4043 subs(len, len, 64); 4044 crc32x(crc, crc, tmp2); 4045 ldr(tmp0, Address(buf, 8)); 4046 crc32x(crc, crc, tmp3); 4047 ldr(tmp1, Address(buf, 16)); 4048 crc32x(crc, crc, tmp0); 4049 ldr(tmp2, Address(buf, 24)); 4050 crc32x(crc, crc, tmp1); 4051 ldr(tmp3, Address(buf, 32)); 4052 crc32x(crc, crc, tmp2); 4053 ldr(tmp0, Address(buf, 40)); 4054 crc32x(crc, crc, tmp3); 4055 ldr(tmp1, Address(buf, 48)); 4056 crc32x(crc, crc, tmp0); 4057 ldr(tmp2, Address(buf, 56)); 4058 crc32x(crc, crc, tmp1); 4059 ldr(tmp3, Address(pre(buf, 64))); 4060 br(Assembler::GE, CRC_by64_loop); 4061 4062 // post-loop 4063 crc32x(crc, crc, tmp2); 4064 crc32x(crc, crc, tmp3); 4065 4066 sub(len, len, 64); 4067 add(buf, buf, 8); 4068 cmn(len, (u1)128); 4069 br(Assembler::NE, CRC_less64); 4070 BIND(L_exit); 4071 mvnw(crc, crc); 4072 } 4073 4074 /** 4075 * @param crc register containing existing CRC (32-bit) 4076 * @param buf register pointing to input byte buffer (byte*) 4077 * @param len register containing number of bytes 4078 * @param table register that will contain address of CRC table 4079 * @param tmp scratch register 4080 */ 4081 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4082 Register table0, Register table1, Register table2, Register table3, 4083 Register tmp, Register tmp2, Register tmp3) { 4084 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4085 4086 if (UseCryptoPmullForCRC32) { 4087 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4088 return; 4089 } 4090 4091 if (UseCRC32) { 4092 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4093 return; 4094 } 4095 4096 mvnw(crc, crc); 4097 4098 { 4099 uint64_t offset; 4100 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4101 add(table0, table0, offset); 4102 } 4103 add(table1, table0, 1*256*sizeof(juint)); 4104 add(table2, table0, 2*256*sizeof(juint)); 4105 add(table3, table0, 3*256*sizeof(juint)); 4106 4107 if (UseNeon) { 4108 cmp(len, (u1)64); 4109 br(Assembler::LT, L_by16); 4110 eor(v16, T16B, v16, v16); 4111 4112 Label L_fold; 4113 4114 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4115 4116 ld1(v0, v1, T2D, post(buf, 32)); 4117 ld1r(v4, T2D, post(tmp, 8)); 4118 ld1r(v5, T2D, post(tmp, 8)); 4119 ld1r(v6, T2D, post(tmp, 8)); 4120 ld1r(v7, T2D, post(tmp, 8)); 4121 mov(v16, S, 0, crc); 4122 4123 eor(v0, T16B, v0, v16); 4124 sub(len, len, 64); 4125 4126 BIND(L_fold); 4127 pmull(v22, T8H, v0, v5, T8B); 4128 pmull(v20, T8H, v0, v7, T8B); 4129 pmull(v23, T8H, v0, v4, T8B); 4130 pmull(v21, T8H, v0, v6, T8B); 4131 4132 pmull2(v18, T8H, v0, v5, T16B); 4133 pmull2(v16, T8H, v0, v7, T16B); 4134 pmull2(v19, T8H, v0, v4, T16B); 4135 pmull2(v17, T8H, v0, v6, T16B); 4136 4137 uzp1(v24, T8H, v20, v22); 4138 uzp2(v25, T8H, v20, v22); 4139 eor(v20, T16B, v24, v25); 4140 4141 uzp1(v26, T8H, v16, v18); 4142 uzp2(v27, T8H, v16, v18); 4143 eor(v16, T16B, v26, v27); 4144 4145 ushll2(v22, T4S, v20, T8H, 8); 4146 ushll(v20, T4S, v20, T4H, 8); 4147 4148 ushll2(v18, T4S, v16, T8H, 8); 4149 ushll(v16, T4S, v16, T4H, 8); 4150 4151 eor(v22, T16B, v23, v22); 4152 eor(v18, T16B, v19, v18); 4153 eor(v20, T16B, v21, v20); 4154 eor(v16, T16B, v17, v16); 4155 4156 uzp1(v17, T2D, v16, v20); 4157 uzp2(v21, T2D, v16, v20); 4158 eor(v17, T16B, v17, v21); 4159 4160 ushll2(v20, T2D, v17, T4S, 16); 4161 ushll(v16, T2D, v17, T2S, 16); 4162 4163 eor(v20, T16B, v20, v22); 4164 eor(v16, T16B, v16, v18); 4165 4166 uzp1(v17, T2D, v20, v16); 4167 uzp2(v21, T2D, v20, v16); 4168 eor(v28, T16B, v17, v21); 4169 4170 pmull(v22, T8H, v1, v5, T8B); 4171 pmull(v20, T8H, v1, v7, T8B); 4172 pmull(v23, T8H, v1, v4, T8B); 4173 pmull(v21, T8H, v1, v6, T8B); 4174 4175 pmull2(v18, T8H, v1, v5, T16B); 4176 pmull2(v16, T8H, v1, v7, T16B); 4177 pmull2(v19, T8H, v1, v4, T16B); 4178 pmull2(v17, T8H, v1, v6, T16B); 4179 4180 ld1(v0, v1, T2D, post(buf, 32)); 4181 4182 uzp1(v24, T8H, v20, v22); 4183 uzp2(v25, T8H, v20, v22); 4184 eor(v20, T16B, v24, v25); 4185 4186 uzp1(v26, T8H, v16, v18); 4187 uzp2(v27, T8H, v16, v18); 4188 eor(v16, T16B, v26, v27); 4189 4190 ushll2(v22, T4S, v20, T8H, 8); 4191 ushll(v20, T4S, v20, T4H, 8); 4192 4193 ushll2(v18, T4S, v16, T8H, 8); 4194 ushll(v16, T4S, v16, T4H, 8); 4195 4196 eor(v22, T16B, v23, v22); 4197 eor(v18, T16B, v19, v18); 4198 eor(v20, T16B, v21, v20); 4199 eor(v16, T16B, v17, v16); 4200 4201 uzp1(v17, T2D, v16, v20); 4202 uzp2(v21, T2D, v16, v20); 4203 eor(v16, T16B, v17, v21); 4204 4205 ushll2(v20, T2D, v16, T4S, 16); 4206 ushll(v16, T2D, v16, T2S, 16); 4207 4208 eor(v20, T16B, v22, v20); 4209 eor(v16, T16B, v16, v18); 4210 4211 uzp1(v17, T2D, v20, v16); 4212 uzp2(v21, T2D, v20, v16); 4213 eor(v20, T16B, v17, v21); 4214 4215 shl(v16, T2D, v28, 1); 4216 shl(v17, T2D, v20, 1); 4217 4218 eor(v0, T16B, v0, v16); 4219 eor(v1, T16B, v1, v17); 4220 4221 subs(len, len, 32); 4222 br(Assembler::GE, L_fold); 4223 4224 mov(crc, 0); 4225 mov(tmp, v0, D, 0); 4226 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4227 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4228 mov(tmp, v0, D, 1); 4229 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4230 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4231 mov(tmp, v1, D, 0); 4232 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4233 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4234 mov(tmp, v1, D, 1); 4235 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4236 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4237 4238 add(len, len, 32); 4239 } 4240 4241 BIND(L_by16); 4242 subs(len, len, 16); 4243 br(Assembler::GE, L_by16_loop); 4244 adds(len, len, 16-4); 4245 br(Assembler::GE, L_by4_loop); 4246 adds(len, len, 4); 4247 br(Assembler::GT, L_by1_loop); 4248 b(L_exit); 4249 4250 BIND(L_by4_loop); 4251 ldrw(tmp, Address(post(buf, 4))); 4252 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4253 subs(len, len, 4); 4254 br(Assembler::GE, L_by4_loop); 4255 adds(len, len, 4); 4256 br(Assembler::LE, L_exit); 4257 BIND(L_by1_loop); 4258 subs(len, len, 1); 4259 ldrb(tmp, Address(post(buf, 1))); 4260 update_byte_crc32(crc, tmp, table0); 4261 br(Assembler::GT, L_by1_loop); 4262 b(L_exit); 4263 4264 align(CodeEntryAlignment); 4265 BIND(L_by16_loop); 4266 subs(len, len, 16); 4267 ldp(tmp, tmp3, Address(post(buf, 16))); 4268 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4269 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4270 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4271 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4272 br(Assembler::GE, L_by16_loop); 4273 adds(len, len, 16-4); 4274 br(Assembler::GE, L_by4_loop); 4275 adds(len, len, 4); 4276 br(Assembler::GT, L_by1_loop); 4277 BIND(L_exit); 4278 mvnw(crc, crc); 4279 } 4280 4281 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4282 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4283 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4284 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4285 4286 subs(tmp0, len, 384); 4287 br(Assembler::GE, CRC_by128_pre); 4288 BIND(CRC_less128); 4289 subs(len, len, 32); 4290 br(Assembler::GE, CRC_by32_loop); 4291 BIND(CRC_less32); 4292 adds(len, len, 32 - 4); 4293 br(Assembler::GE, CRC_by4_loop); 4294 adds(len, len, 4); 4295 br(Assembler::GT, CRC_by1_loop); 4296 b(L_exit); 4297 4298 BIND(CRC_by32_loop); 4299 ldp(tmp0, tmp1, Address(buf)); 4300 crc32cx(crc, crc, tmp0); 4301 ldr(tmp2, Address(buf, 16)); 4302 crc32cx(crc, crc, tmp1); 4303 ldr(tmp3, Address(buf, 24)); 4304 crc32cx(crc, crc, tmp2); 4305 add(buf, buf, 32); 4306 subs(len, len, 32); 4307 crc32cx(crc, crc, tmp3); 4308 br(Assembler::GE, CRC_by32_loop); 4309 cmn(len, (u1)32); 4310 br(Assembler::NE, CRC_less32); 4311 b(L_exit); 4312 4313 BIND(CRC_by4_loop); 4314 ldrw(tmp0, Address(post(buf, 4))); 4315 subs(len, len, 4); 4316 crc32cw(crc, crc, tmp0); 4317 br(Assembler::GE, CRC_by4_loop); 4318 adds(len, len, 4); 4319 br(Assembler::LE, L_exit); 4320 BIND(CRC_by1_loop); 4321 ldrb(tmp0, Address(post(buf, 1))); 4322 subs(len, len, 1); 4323 crc32cb(crc, crc, tmp0); 4324 br(Assembler::GT, CRC_by1_loop); 4325 b(L_exit); 4326 4327 BIND(CRC_by128_pre); 4328 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4329 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4330 mov(crc, 0); 4331 crc32cx(crc, crc, tmp0); 4332 crc32cx(crc, crc, tmp1); 4333 4334 cbnz(len, CRC_less128); 4335 4336 BIND(L_exit); 4337 } 4338 4339 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4340 Register len, Register tmp0, Register tmp1, Register tmp2, 4341 Register tmp3) { 4342 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4343 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4344 4345 subs(len, len, 128); 4346 br(Assembler::GE, CRC_by64_pre); 4347 BIND(CRC_less64); 4348 adds(len, len, 128-32); 4349 br(Assembler::GE, CRC_by32_loop); 4350 BIND(CRC_less32); 4351 adds(len, len, 32-4); 4352 br(Assembler::GE, CRC_by4_loop); 4353 adds(len, len, 4); 4354 br(Assembler::GT, CRC_by1_loop); 4355 b(L_exit); 4356 4357 BIND(CRC_by32_loop); 4358 ldp(tmp0, tmp1, Address(post(buf, 16))); 4359 subs(len, len, 32); 4360 crc32cx(crc, crc, tmp0); 4361 ldr(tmp2, Address(post(buf, 8))); 4362 crc32cx(crc, crc, tmp1); 4363 ldr(tmp3, Address(post(buf, 8))); 4364 crc32cx(crc, crc, tmp2); 4365 crc32cx(crc, crc, tmp3); 4366 br(Assembler::GE, CRC_by32_loop); 4367 cmn(len, (u1)32); 4368 br(Assembler::NE, CRC_less32); 4369 b(L_exit); 4370 4371 BIND(CRC_by4_loop); 4372 ldrw(tmp0, Address(post(buf, 4))); 4373 subs(len, len, 4); 4374 crc32cw(crc, crc, tmp0); 4375 br(Assembler::GE, CRC_by4_loop); 4376 adds(len, len, 4); 4377 br(Assembler::LE, L_exit); 4378 BIND(CRC_by1_loop); 4379 ldrb(tmp0, Address(post(buf, 1))); 4380 subs(len, len, 1); 4381 crc32cb(crc, crc, tmp0); 4382 br(Assembler::GT, CRC_by1_loop); 4383 b(L_exit); 4384 4385 BIND(CRC_by64_pre); 4386 sub(buf, buf, 8); 4387 ldp(tmp0, tmp1, Address(buf, 8)); 4388 crc32cx(crc, crc, tmp0); 4389 ldr(tmp2, Address(buf, 24)); 4390 crc32cx(crc, crc, tmp1); 4391 ldr(tmp3, Address(buf, 32)); 4392 crc32cx(crc, crc, tmp2); 4393 ldr(tmp0, Address(buf, 40)); 4394 crc32cx(crc, crc, tmp3); 4395 ldr(tmp1, Address(buf, 48)); 4396 crc32cx(crc, crc, tmp0); 4397 ldr(tmp2, Address(buf, 56)); 4398 crc32cx(crc, crc, tmp1); 4399 ldr(tmp3, Address(pre(buf, 64))); 4400 4401 b(CRC_by64_loop); 4402 4403 align(CodeEntryAlignment); 4404 BIND(CRC_by64_loop); 4405 subs(len, len, 64); 4406 crc32cx(crc, crc, tmp2); 4407 ldr(tmp0, Address(buf, 8)); 4408 crc32cx(crc, crc, tmp3); 4409 ldr(tmp1, Address(buf, 16)); 4410 crc32cx(crc, crc, tmp0); 4411 ldr(tmp2, Address(buf, 24)); 4412 crc32cx(crc, crc, tmp1); 4413 ldr(tmp3, Address(buf, 32)); 4414 crc32cx(crc, crc, tmp2); 4415 ldr(tmp0, Address(buf, 40)); 4416 crc32cx(crc, crc, tmp3); 4417 ldr(tmp1, Address(buf, 48)); 4418 crc32cx(crc, crc, tmp0); 4419 ldr(tmp2, Address(buf, 56)); 4420 crc32cx(crc, crc, tmp1); 4421 ldr(tmp3, Address(pre(buf, 64))); 4422 br(Assembler::GE, CRC_by64_loop); 4423 4424 // post-loop 4425 crc32cx(crc, crc, tmp2); 4426 crc32cx(crc, crc, tmp3); 4427 4428 sub(len, len, 64); 4429 add(buf, buf, 8); 4430 cmn(len, (u1)128); 4431 br(Assembler::NE, CRC_less64); 4432 BIND(L_exit); 4433 } 4434 4435 /** 4436 * @param crc register containing existing CRC (32-bit) 4437 * @param buf register pointing to input byte buffer (byte*) 4438 * @param len register containing number of bytes 4439 * @param table register that will contain address of CRC table 4440 * @param tmp scratch register 4441 */ 4442 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4443 Register table0, Register table1, Register table2, Register table3, 4444 Register tmp, Register tmp2, Register tmp3) { 4445 if (UseCryptoPmullForCRC32) { 4446 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4447 } else { 4448 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4449 } 4450 } 4451 4452 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4453 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4454 Label CRC_by128_loop; 4455 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4456 4457 sub(len, len, 256); 4458 Register table = tmp0; 4459 { 4460 uint64_t offset; 4461 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4462 add(table, table, offset); 4463 } 4464 add(table, table, table_offset); 4465 4466 // Registers v0..v7 are used as data registers. 4467 // Registers v16..v31 are used as tmp registers. 4468 sub(buf, buf, 0x10); 4469 ldrq(v0, Address(buf, 0x10)); 4470 ldrq(v1, Address(buf, 0x20)); 4471 ldrq(v2, Address(buf, 0x30)); 4472 ldrq(v3, Address(buf, 0x40)); 4473 ldrq(v4, Address(buf, 0x50)); 4474 ldrq(v5, Address(buf, 0x60)); 4475 ldrq(v6, Address(buf, 0x70)); 4476 ldrq(v7, Address(pre(buf, 0x80))); 4477 4478 movi(v31, T4S, 0); 4479 mov(v31, S, 0, crc); 4480 eor(v0, T16B, v0, v31); 4481 4482 // Register v16 contains constants from the crc table. 4483 ldrq(v16, Address(table)); 4484 b(CRC_by128_loop); 4485 4486 align(OptoLoopAlignment); 4487 BIND(CRC_by128_loop); 4488 pmull (v17, T1Q, v0, v16, T1D); 4489 pmull2(v18, T1Q, v0, v16, T2D); 4490 ldrq(v0, Address(buf, 0x10)); 4491 eor3(v0, T16B, v17, v18, v0); 4492 4493 pmull (v19, T1Q, v1, v16, T1D); 4494 pmull2(v20, T1Q, v1, v16, T2D); 4495 ldrq(v1, Address(buf, 0x20)); 4496 eor3(v1, T16B, v19, v20, v1); 4497 4498 pmull (v21, T1Q, v2, v16, T1D); 4499 pmull2(v22, T1Q, v2, v16, T2D); 4500 ldrq(v2, Address(buf, 0x30)); 4501 eor3(v2, T16B, v21, v22, v2); 4502 4503 pmull (v23, T1Q, v3, v16, T1D); 4504 pmull2(v24, T1Q, v3, v16, T2D); 4505 ldrq(v3, Address(buf, 0x40)); 4506 eor3(v3, T16B, v23, v24, v3); 4507 4508 pmull (v25, T1Q, v4, v16, T1D); 4509 pmull2(v26, T1Q, v4, v16, T2D); 4510 ldrq(v4, Address(buf, 0x50)); 4511 eor3(v4, T16B, v25, v26, v4); 4512 4513 pmull (v27, T1Q, v5, v16, T1D); 4514 pmull2(v28, T1Q, v5, v16, T2D); 4515 ldrq(v5, Address(buf, 0x60)); 4516 eor3(v5, T16B, v27, v28, v5); 4517 4518 pmull (v29, T1Q, v6, v16, T1D); 4519 pmull2(v30, T1Q, v6, v16, T2D); 4520 ldrq(v6, Address(buf, 0x70)); 4521 eor3(v6, T16B, v29, v30, v6); 4522 4523 // Reuse registers v23, v24. 4524 // Using them won't block the first instruction of the next iteration. 4525 pmull (v23, T1Q, v7, v16, T1D); 4526 pmull2(v24, T1Q, v7, v16, T2D); 4527 ldrq(v7, Address(pre(buf, 0x80))); 4528 eor3(v7, T16B, v23, v24, v7); 4529 4530 subs(len, len, 0x80); 4531 br(Assembler::GE, CRC_by128_loop); 4532 4533 // fold into 512 bits 4534 // Use v31 for constants because v16 can be still in use. 4535 ldrq(v31, Address(table, 0x10)); 4536 4537 pmull (v17, T1Q, v0, v31, T1D); 4538 pmull2(v18, T1Q, v0, v31, T2D); 4539 eor3(v0, T16B, v17, v18, v4); 4540 4541 pmull (v19, T1Q, v1, v31, T1D); 4542 pmull2(v20, T1Q, v1, v31, T2D); 4543 eor3(v1, T16B, v19, v20, v5); 4544 4545 pmull (v21, T1Q, v2, v31, T1D); 4546 pmull2(v22, T1Q, v2, v31, T2D); 4547 eor3(v2, T16B, v21, v22, v6); 4548 4549 pmull (v23, T1Q, v3, v31, T1D); 4550 pmull2(v24, T1Q, v3, v31, T2D); 4551 eor3(v3, T16B, v23, v24, v7); 4552 4553 // fold into 128 bits 4554 // Use v17 for constants because v31 can be still in use. 4555 ldrq(v17, Address(table, 0x20)); 4556 pmull (v25, T1Q, v0, v17, T1D); 4557 pmull2(v26, T1Q, v0, v17, T2D); 4558 eor3(v3, T16B, v3, v25, v26); 4559 4560 // Use v18 for constants because v17 can be still in use. 4561 ldrq(v18, Address(table, 0x30)); 4562 pmull (v27, T1Q, v1, v18, T1D); 4563 pmull2(v28, T1Q, v1, v18, T2D); 4564 eor3(v3, T16B, v3, v27, v28); 4565 4566 // Use v19 for constants because v18 can be still in use. 4567 ldrq(v19, Address(table, 0x40)); 4568 pmull (v29, T1Q, v2, v19, T1D); 4569 pmull2(v30, T1Q, v2, v19, T2D); 4570 eor3(v0, T16B, v3, v29, v30); 4571 4572 add(len, len, 0x80); 4573 add(buf, buf, 0x10); 4574 4575 mov(tmp0, v0, D, 0); 4576 mov(tmp1, v0, D, 1); 4577 } 4578 4579 SkipIfEqual::SkipIfEqual( 4580 MacroAssembler* masm, const bool* flag_addr, bool value) { 4581 _masm = masm; 4582 uint64_t offset; 4583 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4584 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4585 if (value) { 4586 _masm->cbnzw(rscratch1, _label); 4587 } else { 4588 _masm->cbzw(rscratch1, _label); 4589 } 4590 } 4591 4592 SkipIfEqual::~SkipIfEqual() { 4593 _masm->bind(_label); 4594 } 4595 4596 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4597 Address adr; 4598 switch(dst.getMode()) { 4599 case Address::base_plus_offset: 4600 // This is the expected mode, although we allow all the other 4601 // forms below. 4602 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4603 break; 4604 default: 4605 lea(rscratch2, dst); 4606 adr = Address(rscratch2); 4607 break; 4608 } 4609 ldr(rscratch1, adr); 4610 add(rscratch1, rscratch1, src); 4611 str(rscratch1, adr); 4612 } 4613 4614 void MacroAssembler::cmpptr(Register src1, Address src2) { 4615 uint64_t offset; 4616 adrp(rscratch1, src2, offset); 4617 ldr(rscratch1, Address(rscratch1, offset)); 4618 cmp(src1, rscratch1); 4619 } 4620 4621 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4622 cmp(obj1, obj2); 4623 } 4624 4625 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4626 load_method_holder(rresult, rmethod); 4627 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4628 } 4629 4630 void MacroAssembler::load_method_holder(Register holder, Register method) { 4631 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4632 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4633 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4634 } 4635 4636 void MacroAssembler::load_metadata(Register dst, Register src) { 4637 if (UseCompressedClassPointers) { 4638 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4639 } else { 4640 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4641 } 4642 } 4643 4644 void MacroAssembler::load_klass(Register dst, Register src) { 4645 if (UseCompressedClassPointers) { 4646 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4647 decode_klass_not_null(dst); 4648 } else { 4649 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 4650 } 4651 } 4652 4653 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 4654 if (RestoreMXCSROnJNICalls) { 4655 Label OK; 4656 get_fpcr(tmp1); 4657 mov(tmp2, tmp1); 4658 // Set FPCR to the state we need. We do want Round to Nearest. We 4659 // don't want non-IEEE rounding modes or floating-point traps. 4660 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 4661 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 4662 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 4663 eor(tmp2, tmp1, tmp2); 4664 cbz(tmp2, OK); // Only reset FPCR if it's wrong 4665 set_fpcr(tmp1); 4666 bind(OK); 4667 } 4668 } 4669 4670 // ((OopHandle)result).resolve(); 4671 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 4672 // OopHandle::resolve is an indirection. 4673 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 4674 } 4675 4676 // ((WeakHandle)result).resolve(); 4677 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 4678 assert_different_registers(result, tmp1, tmp2); 4679 Label resolved; 4680 4681 // A null weak handle resolves to null. 4682 cbz(result, resolved); 4683 4684 // Only 64 bit platforms support GCs that require a tmp register 4685 // WeakHandle::resolve is an indirection like jweak. 4686 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4687 result, Address(result), tmp1, tmp2); 4688 bind(resolved); 4689 } 4690 4691 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 4692 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 4693 ldr(dst, Address(rmethod, Method::const_offset())); 4694 ldr(dst, Address(dst, ConstMethod::constants_offset())); 4695 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 4696 ldr(dst, Address(dst, mirror_offset)); 4697 resolve_oop_handle(dst, tmp1, tmp2); 4698 } 4699 4700 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 4701 if (UseCompressedClassPointers) { 4702 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4703 if (CompressedKlassPointers::base() == nullptr) { 4704 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 4705 return; 4706 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 4707 && CompressedKlassPointers::shift() == 0) { 4708 // Only the bottom 32 bits matter 4709 cmpw(trial_klass, tmp); 4710 return; 4711 } 4712 decode_klass_not_null(tmp); 4713 } else { 4714 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 4715 } 4716 cmp(trial_klass, tmp); 4717 } 4718 4719 void MacroAssembler::load_prototype_header(Register dst, Register src) { 4720 load_klass(dst, src); 4721 ldr(dst, Address(dst, Klass::prototype_header_offset())); 4722 } 4723 4724 void MacroAssembler::store_klass(Register dst, Register src) { 4725 // FIXME: Should this be a store release? concurrent gcs assumes 4726 // klass length is valid if klass field is not null. 4727 if (UseCompressedClassPointers) { 4728 encode_klass_not_null(src); 4729 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4730 } else { 4731 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 4732 } 4733 } 4734 4735 void MacroAssembler::store_klass_gap(Register dst, Register src) { 4736 if (UseCompressedClassPointers) { 4737 // Store to klass gap in destination 4738 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 4739 } 4740 } 4741 4742 // Algorithm must match CompressedOops::encode. 4743 void MacroAssembler::encode_heap_oop(Register d, Register s) { 4744 #ifdef ASSERT 4745 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 4746 #endif 4747 verify_oop_msg(s, "broken oop in encode_heap_oop"); 4748 if (CompressedOops::base() == nullptr) { 4749 if (CompressedOops::shift() != 0) { 4750 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4751 lsr(d, s, LogMinObjAlignmentInBytes); 4752 } else { 4753 mov(d, s); 4754 } 4755 } else { 4756 subs(d, s, rheapbase); 4757 csel(d, d, zr, Assembler::HS); 4758 lsr(d, d, LogMinObjAlignmentInBytes); 4759 4760 /* Old algorithm: is this any worse? 4761 Label nonnull; 4762 cbnz(r, nonnull); 4763 sub(r, r, rheapbase); 4764 bind(nonnull); 4765 lsr(r, r, LogMinObjAlignmentInBytes); 4766 */ 4767 } 4768 } 4769 4770 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4771 #ifdef ASSERT 4772 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 4773 if (CheckCompressedOops) { 4774 Label ok; 4775 cbnz(r, ok); 4776 stop("null oop passed to encode_heap_oop_not_null"); 4777 bind(ok); 4778 } 4779 #endif 4780 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 4781 if (CompressedOops::base() != nullptr) { 4782 sub(r, r, rheapbase); 4783 } 4784 if (CompressedOops::shift() != 0) { 4785 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4786 lsr(r, r, LogMinObjAlignmentInBytes); 4787 } 4788 } 4789 4790 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 4791 #ifdef ASSERT 4792 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 4793 if (CheckCompressedOops) { 4794 Label ok; 4795 cbnz(src, ok); 4796 stop("null oop passed to encode_heap_oop_not_null2"); 4797 bind(ok); 4798 } 4799 #endif 4800 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 4801 4802 Register data = src; 4803 if (CompressedOops::base() != nullptr) { 4804 sub(dst, src, rheapbase); 4805 data = dst; 4806 } 4807 if (CompressedOops::shift() != 0) { 4808 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4809 lsr(dst, data, LogMinObjAlignmentInBytes); 4810 data = dst; 4811 } 4812 if (data == src) 4813 mov(dst, src); 4814 } 4815 4816 void MacroAssembler::decode_heap_oop(Register d, Register s) { 4817 #ifdef ASSERT 4818 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 4819 #endif 4820 if (CompressedOops::base() == nullptr) { 4821 if (CompressedOops::shift() != 0 || d != s) { 4822 lsl(d, s, CompressedOops::shift()); 4823 } 4824 } else { 4825 Label done; 4826 if (d != s) 4827 mov(d, s); 4828 cbz(s, done); 4829 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 4830 bind(done); 4831 } 4832 verify_oop_msg(d, "broken oop in decode_heap_oop"); 4833 } 4834 4835 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4836 assert (UseCompressedOops, "should only be used for compressed headers"); 4837 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4838 // Cannot assert, unverified entry point counts instructions (see .ad file) 4839 // vtableStubs also counts instructions in pd_code_size_limit. 4840 // Also do not verify_oop as this is called by verify_oop. 4841 if (CompressedOops::shift() != 0) { 4842 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4843 if (CompressedOops::base() != nullptr) { 4844 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 4845 } else { 4846 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 4847 } 4848 } else { 4849 assert (CompressedOops::base() == nullptr, "sanity"); 4850 } 4851 } 4852 4853 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 4854 assert (UseCompressedOops, "should only be used for compressed headers"); 4855 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4856 // Cannot assert, unverified entry point counts instructions (see .ad file) 4857 // vtableStubs also counts instructions in pd_code_size_limit. 4858 // Also do not verify_oop as this is called by verify_oop. 4859 if (CompressedOops::shift() != 0) { 4860 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 4861 if (CompressedOops::base() != nullptr) { 4862 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 4863 } else { 4864 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 4865 } 4866 } else { 4867 assert (CompressedOops::base() == nullptr, "sanity"); 4868 if (dst != src) { 4869 mov(dst, src); 4870 } 4871 } 4872 } 4873 4874 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 4875 4876 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 4877 assert(UseCompressedClassPointers, "not using compressed class pointers"); 4878 assert(Metaspace::initialized(), "metaspace not initialized yet"); 4879 4880 if (_klass_decode_mode != KlassDecodeNone) { 4881 return _klass_decode_mode; 4882 } 4883 4884 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 4885 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 4886 4887 if (CompressedKlassPointers::base() == nullptr) { 4888 return (_klass_decode_mode = KlassDecodeZero); 4889 } 4890 4891 if (operand_valid_for_logical_immediate( 4892 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 4893 const uint64_t range_mask = 4894 (1ULL << log2i(CompressedKlassPointers::range())) - 1; 4895 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 4896 return (_klass_decode_mode = KlassDecodeXor); 4897 } 4898 } 4899 4900 const uint64_t shifted_base = 4901 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 4902 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 4903 "compressed class base bad alignment"); 4904 4905 return (_klass_decode_mode = KlassDecodeMovk); 4906 } 4907 4908 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 4909 switch (klass_decode_mode()) { 4910 case KlassDecodeZero: 4911 if (CompressedKlassPointers::shift() != 0) { 4912 lsr(dst, src, LogKlassAlignmentInBytes); 4913 } else { 4914 if (dst != src) mov(dst, src); 4915 } 4916 break; 4917 4918 case KlassDecodeXor: 4919 if (CompressedKlassPointers::shift() != 0) { 4920 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4921 lsr(dst, dst, LogKlassAlignmentInBytes); 4922 } else { 4923 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4924 } 4925 break; 4926 4927 case KlassDecodeMovk: 4928 if (CompressedKlassPointers::shift() != 0) { 4929 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 4930 } else { 4931 movw(dst, src); 4932 } 4933 break; 4934 4935 case KlassDecodeNone: 4936 ShouldNotReachHere(); 4937 break; 4938 } 4939 } 4940 4941 void MacroAssembler::encode_klass_not_null(Register r) { 4942 encode_klass_not_null(r, r); 4943 } 4944 4945 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 4946 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 4947 4948 switch (klass_decode_mode()) { 4949 case KlassDecodeZero: 4950 if (CompressedKlassPointers::shift() != 0) { 4951 lsl(dst, src, LogKlassAlignmentInBytes); 4952 } else { 4953 if (dst != src) mov(dst, src); 4954 } 4955 break; 4956 4957 case KlassDecodeXor: 4958 if (CompressedKlassPointers::shift() != 0) { 4959 lsl(dst, src, LogKlassAlignmentInBytes); 4960 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 4961 } else { 4962 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 4963 } 4964 break; 4965 4966 case KlassDecodeMovk: { 4967 const uint64_t shifted_base = 4968 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 4969 4970 if (dst != src) movw(dst, src); 4971 movk(dst, shifted_base >> 32, 32); 4972 4973 if (CompressedKlassPointers::shift() != 0) { 4974 lsl(dst, dst, LogKlassAlignmentInBytes); 4975 } 4976 4977 break; 4978 } 4979 4980 case KlassDecodeNone: 4981 ShouldNotReachHere(); 4982 break; 4983 } 4984 } 4985 4986 void MacroAssembler::decode_klass_not_null(Register r) { 4987 decode_klass_not_null(r, r); 4988 } 4989 4990 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 4991 #ifdef ASSERT 4992 { 4993 ThreadInVMfromUnknown tiv; 4994 assert (UseCompressedOops, "should only be used for compressed oops"); 4995 assert (Universe::heap() != nullptr, "java heap should be initialized"); 4996 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 4997 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 4998 } 4999 #endif 5000 int oop_index = oop_recorder()->find_index(obj); 5001 InstructionMark im(this); 5002 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5003 code_section()->relocate(inst_mark(), rspec); 5004 movz(dst, 0xDEAD, 16); 5005 movk(dst, 0xBEEF); 5006 } 5007 5008 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5009 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5010 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5011 int index = oop_recorder()->find_index(k); 5012 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5013 5014 InstructionMark im(this); 5015 RelocationHolder rspec = metadata_Relocation::spec(index); 5016 code_section()->relocate(inst_mark(), rspec); 5017 narrowKlass nk = CompressedKlassPointers::encode(k); 5018 movz(dst, (nk >> 16), 16); 5019 movk(dst, nk & 0xffff); 5020 } 5021 5022 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5023 Register dst, Address src, 5024 Register tmp1, Register tmp2) { 5025 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5026 decorators = AccessInternal::decorator_fixup(decorators, type); 5027 bool as_raw = (decorators & AS_RAW) != 0; 5028 if (as_raw) { 5029 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5030 } else { 5031 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5032 } 5033 } 5034 5035 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5036 Address dst, Register val, 5037 Register tmp1, Register tmp2, Register tmp3) { 5038 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5039 decorators = AccessInternal::decorator_fixup(decorators, type); 5040 bool as_raw = (decorators & AS_RAW) != 0; 5041 if (as_raw) { 5042 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5043 } else { 5044 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5045 } 5046 } 5047 5048 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, 5049 Register inline_klass) { 5050 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5051 bs->value_copy(this, decorators, src, dst, inline_klass); 5052 } 5053 5054 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 5055 ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 5056 ldrw(offset, Address(offset, InlineKlass::first_field_offset_offset())); 5057 } 5058 5059 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 5060 // ((address) (void*) o) + vk->first_field_offset(); 5061 Register offset = (data == oop) ? rscratch1 : data; 5062 first_field_offset(inline_klass, offset); 5063 if (data == oop) { 5064 add(data, data, offset); 5065 } else { 5066 lea(data, Address(oop, offset)); 5067 } 5068 } 5069 5070 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 5071 Register index, Register data) { 5072 assert_different_registers(array, array_klass, index); 5073 assert_different_registers(rscratch1, array, index); 5074 5075 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 5076 ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset())); 5077 5078 // Klass::layout_helper_log2_element_size(lh) 5079 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 5080 lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift); 5081 andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask); 5082 lslv(index, index, rscratch1); 5083 5084 add(data, array, index); 5085 add(data, data, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)); 5086 } 5087 5088 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5089 Register tmp2, DecoratorSet decorators) { 5090 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5091 } 5092 5093 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5094 Register tmp2, DecoratorSet decorators) { 5095 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5096 } 5097 5098 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5099 Register tmp2, Register tmp3, DecoratorSet decorators) { 5100 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5101 } 5102 5103 // Used for storing nulls. 5104 void MacroAssembler::store_heap_oop_null(Address dst) { 5105 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5106 } 5107 5108 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5109 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5110 int index = oop_recorder()->allocate_metadata_index(obj); 5111 RelocationHolder rspec = metadata_Relocation::spec(index); 5112 return Address((address)obj, rspec); 5113 } 5114 5115 // Move an oop into a register. 5116 void MacroAssembler::movoop(Register dst, jobject obj) { 5117 int oop_index; 5118 if (obj == nullptr) { 5119 oop_index = oop_recorder()->allocate_oop_index(obj); 5120 } else { 5121 #ifdef ASSERT 5122 { 5123 ThreadInVMfromUnknown tiv; 5124 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5125 } 5126 #endif 5127 oop_index = oop_recorder()->find_index(obj); 5128 } 5129 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5130 5131 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5132 mov(dst, Address((address)obj, rspec)); 5133 } else { 5134 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5135 ldr_constant(dst, Address(dummy, rspec)); 5136 } 5137 5138 } 5139 5140 // Move a metadata address into a register. 5141 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5142 int oop_index; 5143 if (obj == nullptr) { 5144 oop_index = oop_recorder()->allocate_metadata_index(obj); 5145 } else { 5146 oop_index = oop_recorder()->find_index(obj); 5147 } 5148 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5149 mov(dst, Address((address)obj, rspec)); 5150 } 5151 5152 Address MacroAssembler::constant_oop_address(jobject obj) { 5153 #ifdef ASSERT 5154 { 5155 ThreadInVMfromUnknown tiv; 5156 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5157 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5158 } 5159 #endif 5160 int oop_index = oop_recorder()->find_index(obj); 5161 return Address((address)obj, oop_Relocation::spec(oop_index)); 5162 } 5163 5164 // Object / value buffer allocation... 5165 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 5166 Register t1, Register t2, 5167 bool clear_fields, Label& alloc_failed) 5168 { 5169 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 5170 Register layout_size = t1; 5171 assert(new_obj == r0, "needs to be r0"); 5172 assert_different_registers(klass, new_obj, t1, t2); 5173 5174 // get instance_size in InstanceKlass (scaled to a count of bytes) 5175 ldrw(layout_size, Address(klass, Klass::layout_helper_offset())); 5176 // test to see if it has a finalizer or is malformed in some way 5177 tst(layout_size, Klass::_lh_instance_slow_path_bit); 5178 br(Assembler::NE, slow_case_no_pop); 5179 5180 // Allocate the instance: 5181 // If TLAB is enabled: 5182 // Try to allocate in the TLAB. 5183 // If fails, go to the slow path. 5184 // Initialize the allocation. 5185 // Exit. 5186 // 5187 // Go to slow path. 5188 5189 if (UseTLAB) { 5190 push(klass); 5191 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case); 5192 if (ZeroTLAB || (!clear_fields)) { 5193 // the fields have been already cleared 5194 b(initialize_header); 5195 } else { 5196 // initialize both the header and fields 5197 b(initialize_object); 5198 } 5199 5200 if (clear_fields) { 5201 // The object is initialized before the header. If the object size is 5202 // zero, go directly to the header initialization. 5203 bind(initialize_object); 5204 subs(layout_size, layout_size, sizeof(oopDesc)); 5205 br(Assembler::EQ, initialize_header); 5206 5207 // Initialize topmost object field, divide size by 8, check if odd and 5208 // test if zero. 5209 5210 #ifdef ASSERT 5211 // make sure instance_size was multiple of 8 5212 Label L; 5213 tst(layout_size, 7); 5214 br(Assembler::EQ, L); 5215 stop("object size is not multiple of 8 - adjust this code"); 5216 bind(L); 5217 // must be > 0, no extra check needed here 5218 #endif 5219 5220 lsr(layout_size, layout_size, LogBytesPerLong); 5221 5222 // initialize remaining object fields: instance_size was a multiple of 8 5223 { 5224 Label loop; 5225 Register base = t2; 5226 5227 bind(loop); 5228 add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong); 5229 str(zr, Address(rscratch1, sizeof(oopDesc) - 1*oopSize)); 5230 subs(layout_size, layout_size, 1); 5231 br(Assembler::NE, loop); 5232 } 5233 } // clear_fields 5234 5235 // initialize object header only. 5236 bind(initialize_header); 5237 pop(klass); 5238 Register mark_word = t2; 5239 ldr(mark_word, Address(klass, Klass::prototype_header_offset())); 5240 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes ())); 5241 store_klass_gap(new_obj, zr); // zero klass gap for compressed oops 5242 mov(t2, klass); // preserve klass 5243 store_klass(new_obj, t2); // src klass reg is potentially compressed 5244 5245 // TODO: Valhalla removed SharedRuntime::dtrace_object_alloc from here ? 5246 5247 b(done); 5248 } 5249 5250 if (UseTLAB) { 5251 bind(slow_case); 5252 pop(klass); 5253 } 5254 bind(slow_case_no_pop); 5255 b(alloc_failed); 5256 5257 bind(done); 5258 } 5259 5260 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5261 void MacroAssembler::tlab_allocate(Register obj, 5262 Register var_size_in_bytes, 5263 int con_size_in_bytes, 5264 Register t1, 5265 Register t2, 5266 Label& slow_case) { 5267 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5268 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5269 } 5270 5271 void MacroAssembler::verify_tlab() { 5272 #ifdef ASSERT 5273 if (UseTLAB && VerifyOops) { 5274 Label next, ok; 5275 5276 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5277 5278 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5279 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5280 cmp(rscratch2, rscratch1); 5281 br(Assembler::HS, next); 5282 STOP("assert(top >= start)"); 5283 should_not_reach_here(); 5284 5285 bind(next); 5286 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5287 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5288 cmp(rscratch2, rscratch1); 5289 br(Assembler::HS, ok); 5290 STOP("assert(top <= end)"); 5291 should_not_reach_here(); 5292 5293 bind(ok); 5294 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5295 } 5296 #endif 5297 } 5298 5299 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) { 5300 ldr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset())); 5301 #ifdef ASSERT 5302 { 5303 Label done; 5304 cbnz(inline_klass, done); 5305 stop("get_inline_type_field_klass contains no inline klass"); 5306 bind(done); 5307 } 5308 #endif 5309 lea(inline_klass, Address(inline_klass, Array<InlineKlass*>::base_offset_in_bytes())); 5310 ldr(inline_klass, Address(inline_klass, index, Address::lsl(3))); 5311 } 5312 5313 // Writes to stack successive pages until offset reached to check for 5314 // stack overflow + shadow pages. This clobbers tmp. 5315 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5316 assert_different_registers(tmp, size, rscratch1); 5317 mov(tmp, sp); 5318 // Bang stack for total size given plus shadow page size. 5319 // Bang one page at a time because large size can bang beyond yellow and 5320 // red zones. 5321 Label loop; 5322 mov(rscratch1, (int)os::vm_page_size()); 5323 bind(loop); 5324 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5325 subsw(size, size, rscratch1); 5326 str(size, Address(tmp)); 5327 br(Assembler::GT, loop); 5328 5329 // Bang down shadow pages too. 5330 // At this point, (tmp-0) is the last address touched, so don't 5331 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5332 // was post-decremented.) Skip this address by starting at i=1, and 5333 // touch a few more pages below. N.B. It is important to touch all 5334 // the way down to and including i=StackShadowPages. 5335 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5336 // this could be any sized move but this is can be a debugging crumb 5337 // so the bigger the better. 5338 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5339 str(size, Address(tmp)); 5340 } 5341 } 5342 5343 // Move the address of the polling page into dest. 5344 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5345 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5346 } 5347 5348 // Read the polling page. The address of the polling page must 5349 // already be in r. 5350 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5351 address mark; 5352 { 5353 InstructionMark im(this); 5354 code_section()->relocate(inst_mark(), rtype); 5355 ldrw(zr, Address(r, 0)); 5356 mark = inst_mark(); 5357 } 5358 verify_cross_modify_fence_not_required(); 5359 return mark; 5360 } 5361 5362 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5363 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5364 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5365 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5366 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5367 int64_t offset_low = dest_page - low_page; 5368 int64_t offset_high = dest_page - high_page; 5369 5370 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5371 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5372 5373 InstructionMark im(this); 5374 code_section()->relocate(inst_mark(), dest.rspec()); 5375 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5376 // the code cache so that if it is relocated we know it will still reach 5377 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5378 _adrp(reg1, dest.target()); 5379 } else { 5380 uint64_t target = (uint64_t)dest.target(); 5381 uint64_t adrp_target 5382 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5383 5384 _adrp(reg1, (address)adrp_target); 5385 movk(reg1, target >> 32, 32); 5386 } 5387 byte_offset = (uint64_t)dest.target() & 0xfff; 5388 } 5389 5390 void MacroAssembler::load_byte_map_base(Register reg) { 5391 CardTable::CardValue* byte_map_base = 5392 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5393 5394 // Strictly speaking the byte_map_base isn't an address at all, and it might 5395 // even be negative. It is thus materialised as a constant. 5396 mov(reg, (uint64_t)byte_map_base); 5397 } 5398 5399 void MacroAssembler::build_frame(int framesize) { 5400 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5401 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5402 protect_return_address(); 5403 if (framesize < ((1 << 9) + 2 * wordSize)) { 5404 sub(sp, sp, framesize); 5405 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5406 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5407 } else { 5408 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5409 if (PreserveFramePointer) mov(rfp, sp); 5410 if (framesize < ((1 << 12) + 2 * wordSize)) 5411 sub(sp, sp, framesize - 2 * wordSize); 5412 else { 5413 mov(rscratch1, framesize - 2 * wordSize); 5414 sub(sp, sp, rscratch1); 5415 } 5416 } 5417 verify_cross_modify_fence_not_required(); 5418 } 5419 5420 void MacroAssembler::remove_frame(int framesize) { 5421 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5422 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5423 if (framesize < ((1 << 9) + 2 * wordSize)) { 5424 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5425 add(sp, sp, framesize); 5426 } else { 5427 if (framesize < ((1 << 12) + 2 * wordSize)) 5428 add(sp, sp, framesize - 2 * wordSize); 5429 else { 5430 mov(rscratch1, framesize - 2 * wordSize); 5431 add(sp, sp, rscratch1); 5432 } 5433 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5434 } 5435 authenticate_return_address(); 5436 } 5437 5438 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 5439 if (needs_stack_repair) { 5440 // Remove the extension of the caller's frame used for inline type unpacking 5441 // 5442 // Right now the stack looks like this: 5443 // 5444 // | Arguments from caller | 5445 // |---------------------------| <-- caller's SP 5446 // | Saved LR #1 | 5447 // | Saved FP #1 | 5448 // |---------------------------| 5449 // | Extension space for | 5450 // | inline arg (un)packing | 5451 // |---------------------------| <-- start of this method's frame 5452 // | Saved LR #2 | 5453 // | Saved FP #2 | 5454 // |---------------------------| <-- FP 5455 // | sp_inc | 5456 // | method locals | 5457 // |---------------------------| <-- SP 5458 // 5459 // There are two copies of FP and LR on the stack. They will be identical 5460 // unless the caller has been deoptimized, in which case LR #1 will be patched 5461 // to point at the deopt blob, and LR #2 will still point into the old method. 5462 // 5463 // The sp_inc stack slot holds the total size of the frame including the 5464 // extension space minus two words for the saved FP and LR. 5465 5466 int sp_inc_offset = initial_framesize - 3 * wordSize; // Immediately below saved LR and FP 5467 5468 ldr(rscratch1, Address(sp, sp_inc_offset)); 5469 add(sp, sp, rscratch1); 5470 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5471 } else { 5472 remove_frame(initial_framesize); 5473 } 5474 } 5475 5476 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) { 5477 int real_frame_size = frame_size + sp_inc; 5478 assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value"); 5479 assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space"); 5480 assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 5481 5482 int sp_inc_offset = frame_size - 3 * wordSize; // Immediately below saved LR and FP 5483 5484 // Subtract two words for the saved FP and LR as these will be popped 5485 // separately. See remove_frame above. 5486 mov(rscratch1, real_frame_size - 2*wordSize); 5487 str(rscratch1, Address(sp, sp_inc_offset)); 5488 } 5489 5490 // This method counts leading positive bytes (highest bit not set) in provided byte array 5491 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5492 // Simple and most common case of aligned small array which is not at the 5493 // end of memory page is placed here. All other cases are in stub. 5494 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5495 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5496 assert_different_registers(ary1, len, result); 5497 5498 mov(result, len); 5499 cmpw(len, 0); 5500 br(LE, DONE); 5501 cmpw(len, 4 * wordSize); 5502 br(GE, STUB_LONG); // size > 32 then go to stub 5503 5504 int shift = 64 - exact_log2(os::vm_page_size()); 5505 lsl(rscratch1, ary1, shift); 5506 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5507 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5508 br(CS, STUB); // at the end of page then go to stub 5509 subs(len, len, wordSize); 5510 br(LT, END); 5511 5512 BIND(LOOP); 5513 ldr(rscratch1, Address(post(ary1, wordSize))); 5514 tst(rscratch1, UPPER_BIT_MASK); 5515 br(NE, SET_RESULT); 5516 subs(len, len, wordSize); 5517 br(GE, LOOP); 5518 cmpw(len, -wordSize); 5519 br(EQ, DONE); 5520 5521 BIND(END); 5522 ldr(rscratch1, Address(ary1)); 5523 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5524 lslv(rscratch1, rscratch1, rscratch2); 5525 tst(rscratch1, UPPER_BIT_MASK); 5526 br(NE, SET_RESULT); 5527 b(DONE); 5528 5529 BIND(STUB); 5530 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5531 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5532 address tpc1 = trampoline_call(count_pos); 5533 if (tpc1 == nullptr) { 5534 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5535 postcond(pc() == badAddress); 5536 return nullptr; 5537 } 5538 b(DONE); 5539 5540 BIND(STUB_LONG); 5541 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5542 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5543 address tpc2 = trampoline_call(count_pos_long); 5544 if (tpc2 == nullptr) { 5545 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5546 postcond(pc() == badAddress); 5547 return nullptr; 5548 } 5549 b(DONE); 5550 5551 BIND(SET_RESULT); 5552 5553 add(len, len, wordSize); 5554 sub(result, result, len); 5555 5556 BIND(DONE); 5557 postcond(pc() != badAddress); 5558 return pc(); 5559 } 5560 5561 // Clobbers: rscratch1, rscratch2, rflags 5562 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5563 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5564 Register tmp4, Register tmp5, Register result, 5565 Register cnt1, int elem_size) { 5566 Label DONE, SAME; 5567 Register tmp1 = rscratch1; 5568 Register tmp2 = rscratch2; 5569 Register cnt2 = tmp2; // cnt2 only used in array length compare 5570 int elem_per_word = wordSize/elem_size; 5571 int log_elem_size = exact_log2(elem_size); 5572 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5573 int base_offset 5574 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5575 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5576 5577 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5578 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5579 5580 #ifndef PRODUCT 5581 { 5582 const char kind = (elem_size == 2) ? 'U' : 'L'; 5583 char comment[64]; 5584 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5585 BLOCK_COMMENT(comment); 5586 } 5587 #endif 5588 5589 // if (a1 == a2) 5590 // return true; 5591 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5592 br(EQ, SAME); 5593 5594 if (UseSimpleArrayEquals) { 5595 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5596 // if (a1 == nullptr || a2 == nullptr) 5597 // return false; 5598 // a1 & a2 == 0 means (some-pointer is null) or 5599 // (very-rare-or-even-probably-impossible-pointer-values) 5600 // so, we can save one branch in most cases 5601 tst(a1, a2); 5602 mov(result, false); 5603 br(EQ, A_MIGHT_BE_NULL); 5604 // if (a1.length != a2.length) 5605 // return false; 5606 bind(A_IS_NOT_NULL); 5607 ldrw(cnt1, Address(a1, length_offset)); 5608 ldrw(cnt2, Address(a2, length_offset)); 5609 eorw(tmp5, cnt1, cnt2); 5610 cbnzw(tmp5, DONE); 5611 lea(a1, Address(a1, base_offset)); 5612 lea(a2, Address(a2, base_offset)); 5613 // Check for short strings, i.e. smaller than wordSize. 5614 subs(cnt1, cnt1, elem_per_word); 5615 br(Assembler::LT, SHORT); 5616 // Main 8 byte comparison loop. 5617 bind(NEXT_WORD); { 5618 ldr(tmp1, Address(post(a1, wordSize))); 5619 ldr(tmp2, Address(post(a2, wordSize))); 5620 subs(cnt1, cnt1, elem_per_word); 5621 eor(tmp5, tmp1, tmp2); 5622 cbnz(tmp5, DONE); 5623 } br(GT, NEXT_WORD); 5624 // Last longword. In the case where length == 4 we compare the 5625 // same longword twice, but that's still faster than another 5626 // conditional branch. 5627 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5628 // length == 4. 5629 if (log_elem_size > 0) 5630 lsl(cnt1, cnt1, log_elem_size); 5631 ldr(tmp3, Address(a1, cnt1)); 5632 ldr(tmp4, Address(a2, cnt1)); 5633 eor(tmp5, tmp3, tmp4); 5634 cbnz(tmp5, DONE); 5635 b(SAME); 5636 bind(A_MIGHT_BE_NULL); 5637 // in case both a1 and a2 are not-null, proceed with loads 5638 cbz(a1, DONE); 5639 cbz(a2, DONE); 5640 b(A_IS_NOT_NULL); 5641 bind(SHORT); 5642 5643 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5644 { 5645 ldrw(tmp1, Address(post(a1, 4))); 5646 ldrw(tmp2, Address(post(a2, 4))); 5647 eorw(tmp5, tmp1, tmp2); 5648 cbnzw(tmp5, DONE); 5649 } 5650 bind(TAIL03); 5651 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5652 { 5653 ldrh(tmp3, Address(post(a1, 2))); 5654 ldrh(tmp4, Address(post(a2, 2))); 5655 eorw(tmp5, tmp3, tmp4); 5656 cbnzw(tmp5, DONE); 5657 } 5658 bind(TAIL01); 5659 if (elem_size == 1) { // Only needed when comparing byte arrays. 5660 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5661 { 5662 ldrb(tmp1, a1); 5663 ldrb(tmp2, a2); 5664 eorw(tmp5, tmp1, tmp2); 5665 cbnzw(tmp5, DONE); 5666 } 5667 } 5668 } else { 5669 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5670 CSET_EQ, LAST_CHECK; 5671 mov(result, false); 5672 cbz(a1, DONE); 5673 ldrw(cnt1, Address(a1, length_offset)); 5674 cbz(a2, DONE); 5675 ldrw(cnt2, Address(a2, length_offset)); 5676 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 5677 // faster to perform another branch before comparing a1 and a2 5678 cmp(cnt1, (u1)elem_per_word); 5679 br(LE, SHORT); // short or same 5680 ldr(tmp3, Address(pre(a1, base_offset))); 5681 subs(zr, cnt1, stubBytesThreshold); 5682 br(GE, STUB); 5683 ldr(tmp4, Address(pre(a2, base_offset))); 5684 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5685 cmp(cnt2, cnt1); 5686 br(NE, DONE); 5687 5688 // Main 16 byte comparison loop with 2 exits 5689 bind(NEXT_DWORD); { 5690 ldr(tmp1, Address(pre(a1, wordSize))); 5691 ldr(tmp2, Address(pre(a2, wordSize))); 5692 subs(cnt1, cnt1, 2 * elem_per_word); 5693 br(LE, TAIL); 5694 eor(tmp4, tmp3, tmp4); 5695 cbnz(tmp4, DONE); 5696 ldr(tmp3, Address(pre(a1, wordSize))); 5697 ldr(tmp4, Address(pre(a2, wordSize))); 5698 cmp(cnt1, (u1)elem_per_word); 5699 br(LE, TAIL2); 5700 cmp(tmp1, tmp2); 5701 } br(EQ, NEXT_DWORD); 5702 b(DONE); 5703 5704 bind(TAIL); 5705 eor(tmp4, tmp3, tmp4); 5706 eor(tmp2, tmp1, tmp2); 5707 lslv(tmp2, tmp2, tmp5); 5708 orr(tmp5, tmp4, tmp2); 5709 cmp(tmp5, zr); 5710 b(CSET_EQ); 5711 5712 bind(TAIL2); 5713 eor(tmp2, tmp1, tmp2); 5714 cbnz(tmp2, DONE); 5715 b(LAST_CHECK); 5716 5717 bind(STUB); 5718 ldr(tmp4, Address(pre(a2, base_offset))); 5719 cmp(cnt2, cnt1); 5720 br(NE, DONE); 5721 if (elem_size == 2) { // convert to byte counter 5722 lsl(cnt1, cnt1, 1); 5723 } 5724 eor(tmp5, tmp3, tmp4); 5725 cbnz(tmp5, DONE); 5726 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 5727 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 5728 address tpc = trampoline_call(stub); 5729 if (tpc == nullptr) { 5730 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 5731 postcond(pc() == badAddress); 5732 return nullptr; 5733 } 5734 b(DONE); 5735 5736 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 5737 // so, if a2 == null => return false(0), else return true, so we can return a2 5738 mov(result, a2); 5739 b(DONE); 5740 bind(SHORT); 5741 cmp(cnt2, cnt1); 5742 br(NE, DONE); 5743 cbz(cnt1, SAME); 5744 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 5745 ldr(tmp3, Address(a1, base_offset)); 5746 ldr(tmp4, Address(a2, base_offset)); 5747 bind(LAST_CHECK); 5748 eor(tmp4, tmp3, tmp4); 5749 lslv(tmp5, tmp4, tmp5); 5750 cmp(tmp5, zr); 5751 bind(CSET_EQ); 5752 cset(result, EQ); 5753 b(DONE); 5754 } 5755 5756 bind(SAME); 5757 mov(result, true); 5758 // That's it. 5759 bind(DONE); 5760 5761 BLOCK_COMMENT("} array_equals"); 5762 postcond(pc() != badAddress); 5763 return pc(); 5764 } 5765 5766 // Compare Strings 5767 5768 // For Strings we're passed the address of the first characters in a1 5769 // and a2 and the length in cnt1. 5770 // There are two implementations. For arrays >= 8 bytes, all 5771 // comparisons (including the final one, which may overlap) are 5772 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 5773 // halfword, then a short, and then a byte. 5774 5775 void MacroAssembler::string_equals(Register a1, Register a2, 5776 Register result, Register cnt1) 5777 { 5778 Label SAME, DONE, SHORT, NEXT_WORD; 5779 Register tmp1 = rscratch1; 5780 Register tmp2 = rscratch2; 5781 Register cnt2 = tmp2; // cnt2 only used in array length compare 5782 5783 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5784 5785 #ifndef PRODUCT 5786 { 5787 char comment[64]; 5788 snprintf(comment, sizeof comment, "{string_equalsL"); 5789 BLOCK_COMMENT(comment); 5790 } 5791 #endif 5792 5793 mov(result, false); 5794 5795 // Check for short strings, i.e. smaller than wordSize. 5796 subs(cnt1, cnt1, wordSize); 5797 br(Assembler::LT, SHORT); 5798 // Main 8 byte comparison loop. 5799 bind(NEXT_WORD); { 5800 ldr(tmp1, Address(post(a1, wordSize))); 5801 ldr(tmp2, Address(post(a2, wordSize))); 5802 subs(cnt1, cnt1, wordSize); 5803 eor(tmp1, tmp1, tmp2); 5804 cbnz(tmp1, DONE); 5805 } br(GT, NEXT_WORD); 5806 // Last longword. In the case where length == 4 we compare the 5807 // same longword twice, but that's still faster than another 5808 // conditional branch. 5809 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5810 // length == 4. 5811 ldr(tmp1, Address(a1, cnt1)); 5812 ldr(tmp2, Address(a2, cnt1)); 5813 eor(tmp2, tmp1, tmp2); 5814 cbnz(tmp2, DONE); 5815 b(SAME); 5816 5817 bind(SHORT); 5818 Label TAIL03, TAIL01; 5819 5820 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 5821 { 5822 ldrw(tmp1, Address(post(a1, 4))); 5823 ldrw(tmp2, Address(post(a2, 4))); 5824 eorw(tmp1, tmp1, tmp2); 5825 cbnzw(tmp1, DONE); 5826 } 5827 bind(TAIL03); 5828 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 5829 { 5830 ldrh(tmp1, Address(post(a1, 2))); 5831 ldrh(tmp2, Address(post(a2, 2))); 5832 eorw(tmp1, tmp1, tmp2); 5833 cbnzw(tmp1, DONE); 5834 } 5835 bind(TAIL01); 5836 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5837 { 5838 ldrb(tmp1, a1); 5839 ldrb(tmp2, a2); 5840 eorw(tmp1, tmp1, tmp2); 5841 cbnzw(tmp1, DONE); 5842 } 5843 // Arrays are equal. 5844 bind(SAME); 5845 mov(result, true); 5846 5847 // That's it. 5848 bind(DONE); 5849 BLOCK_COMMENT("} string_equals"); 5850 } 5851 5852 5853 // The size of the blocks erased by the zero_blocks stub. We must 5854 // handle anything smaller than this ourselves in zero_words(). 5855 const int MacroAssembler::zero_words_block_size = 8; 5856 5857 // zero_words() is used by C2 ClearArray patterns and by 5858 // C1_MacroAssembler. It is as small as possible, handling small word 5859 // counts locally and delegating anything larger to the zero_blocks 5860 // stub. It is expanded many times in compiled code, so it is 5861 // important to keep it short. 5862 5863 // ptr: Address of a buffer to be zeroed. 5864 // cnt: Count in HeapWords. 5865 // 5866 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 5867 address MacroAssembler::zero_words(Register ptr, Register cnt) 5868 { 5869 assert(is_power_of_2(zero_words_block_size), "adjust this"); 5870 5871 BLOCK_COMMENT("zero_words {"); 5872 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 5873 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5874 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5875 5876 subs(rscratch1, cnt, zero_words_block_size); 5877 Label around; 5878 br(LO, around); 5879 { 5880 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 5881 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 5882 // Make sure this is a C2 compilation. C1 allocates space only for 5883 // trampoline stubs generated by Call LIR ops, and in any case it 5884 // makes sense for a C1 compilation task to proceed as quickly as 5885 // possible. 5886 CompileTask* task; 5887 if (StubRoutines::aarch64::complete() 5888 && Thread::current()->is_Compiler_thread() 5889 && (task = ciEnv::current()->task()) 5890 && is_c2_compile(task->comp_level())) { 5891 address tpc = trampoline_call(zero_blocks); 5892 if (tpc == nullptr) { 5893 DEBUG_ONLY(reset_labels(around)); 5894 return nullptr; 5895 } 5896 } else { 5897 far_call(zero_blocks); 5898 } 5899 } 5900 bind(around); 5901 5902 // We have a few words left to do. zero_blocks has adjusted r10 and r11 5903 // for us. 5904 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 5905 Label l; 5906 tbz(cnt, exact_log2(i), l); 5907 for (int j = 0; j < i; j += 2) { 5908 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 5909 } 5910 bind(l); 5911 } 5912 { 5913 Label l; 5914 tbz(cnt, 0, l); 5915 str(zr, Address(ptr)); 5916 bind(l); 5917 } 5918 5919 BLOCK_COMMENT("} zero_words"); 5920 return pc(); 5921 } 5922 5923 // base: Address of a buffer to be zeroed, 8 bytes aligned. 5924 // cnt: Immediate count in HeapWords. 5925 // 5926 // r10, r11, rscratch1, and rscratch2 are clobbered. 5927 address MacroAssembler::zero_words(Register base, uint64_t cnt) 5928 { 5929 assert(wordSize <= BlockZeroingLowLimit, 5930 "increase BlockZeroingLowLimit"); 5931 address result = nullptr; 5932 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 5933 #ifndef PRODUCT 5934 { 5935 char buf[64]; 5936 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 5937 BLOCK_COMMENT(buf); 5938 } 5939 #endif 5940 if (cnt >= 16) { 5941 uint64_t loops = cnt/16; 5942 if (loops > 1) { 5943 mov(rscratch2, loops - 1); 5944 } 5945 { 5946 Label loop; 5947 bind(loop); 5948 for (int i = 0; i < 16; i += 2) { 5949 stp(zr, zr, Address(base, i * BytesPerWord)); 5950 } 5951 add(base, base, 16 * BytesPerWord); 5952 if (loops > 1) { 5953 subs(rscratch2, rscratch2, 1); 5954 br(GE, loop); 5955 } 5956 } 5957 } 5958 cnt %= 16; 5959 int i = cnt & 1; // store any odd word to start 5960 if (i) str(zr, Address(base)); 5961 for (; i < (int)cnt; i += 2) { 5962 stp(zr, zr, Address(base, i * wordSize)); 5963 } 5964 BLOCK_COMMENT("} zero_words"); 5965 result = pc(); 5966 } else { 5967 mov(r10, base); mov(r11, cnt); 5968 result = zero_words(r10, r11); 5969 } 5970 return result; 5971 } 5972 5973 // Zero blocks of memory by using DC ZVA. 5974 // 5975 // Aligns the base address first sufficiently for DC ZVA, then uses 5976 // DC ZVA repeatedly for every full block. cnt is the size to be 5977 // zeroed in HeapWords. Returns the count of words left to be zeroed 5978 // in cnt. 5979 // 5980 // NOTE: This is intended to be used in the zero_blocks() stub. If 5981 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 5982 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 5983 Register tmp = rscratch1; 5984 Register tmp2 = rscratch2; 5985 int zva_length = VM_Version::zva_length(); 5986 Label initial_table_end, loop_zva; 5987 Label fini; 5988 5989 // Base must be 16 byte aligned. If not just return and let caller handle it 5990 tst(base, 0x0f); 5991 br(Assembler::NE, fini); 5992 // Align base with ZVA length. 5993 neg(tmp, base); 5994 andr(tmp, tmp, zva_length - 1); 5995 5996 // tmp: the number of bytes to be filled to align the base with ZVA length. 5997 add(base, base, tmp); 5998 sub(cnt, cnt, tmp, Assembler::ASR, 3); 5999 adr(tmp2, initial_table_end); 6000 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6001 br(tmp2); 6002 6003 for (int i = -zva_length + 16; i < 0; i += 16) 6004 stp(zr, zr, Address(base, i)); 6005 bind(initial_table_end); 6006 6007 sub(cnt, cnt, zva_length >> 3); 6008 bind(loop_zva); 6009 dc(Assembler::ZVA, base); 6010 subs(cnt, cnt, zva_length >> 3); 6011 add(base, base, zva_length); 6012 br(Assembler::GE, loop_zva); 6013 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6014 bind(fini); 6015 } 6016 6017 // base: Address of a buffer to be filled, 8 bytes aligned. 6018 // cnt: Count in 8-byte unit. 6019 // value: Value to be filled with. 6020 // base will point to the end of the buffer after filling. 6021 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6022 { 6023 // Algorithm: 6024 // 6025 // if (cnt == 0) { 6026 // return; 6027 // } 6028 // if ((p & 8) != 0) { 6029 // *p++ = v; 6030 // } 6031 // 6032 // scratch1 = cnt & 14; 6033 // cnt -= scratch1; 6034 // p += scratch1; 6035 // switch (scratch1 / 2) { 6036 // do { 6037 // cnt -= 16; 6038 // p[-16] = v; 6039 // p[-15] = v; 6040 // case 7: 6041 // p[-14] = v; 6042 // p[-13] = v; 6043 // case 6: 6044 // p[-12] = v; 6045 // p[-11] = v; 6046 // // ... 6047 // case 1: 6048 // p[-2] = v; 6049 // p[-1] = v; 6050 // case 0: 6051 // p += 16; 6052 // } while (cnt); 6053 // } 6054 // if ((cnt & 1) == 1) { 6055 // *p++ = v; 6056 // } 6057 6058 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6059 6060 Label fini, skip, entry, loop; 6061 const int unroll = 8; // Number of stp instructions we'll unroll 6062 6063 cbz(cnt, fini); 6064 tbz(base, 3, skip); 6065 str(value, Address(post(base, 8))); 6066 sub(cnt, cnt, 1); 6067 bind(skip); 6068 6069 andr(rscratch1, cnt, (unroll-1) * 2); 6070 sub(cnt, cnt, rscratch1); 6071 add(base, base, rscratch1, Assembler::LSL, 3); 6072 adr(rscratch2, entry); 6073 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6074 br(rscratch2); 6075 6076 bind(loop); 6077 add(base, base, unroll * 16); 6078 for (int i = -unroll; i < 0; i++) 6079 stp(value, value, Address(base, i * 16)); 6080 bind(entry); 6081 subs(cnt, cnt, unroll * 2); 6082 br(Assembler::GE, loop); 6083 6084 tbz(cnt, 0, fini); 6085 str(value, Address(post(base, 8))); 6086 bind(fini); 6087 } 6088 6089 // Intrinsic for 6090 // 6091 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6092 // return the number of characters copied. 6093 // - java/lang/StringUTF16.compress 6094 // return index of non-latin1 character if copy fails, otherwise 'len'. 6095 // 6096 // This version always returns the number of characters copied, and does not 6097 // clobber the 'len' register. A successful copy will complete with the post- 6098 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6099 // post-condition: 0 <= 'res' < 'len'. 6100 // 6101 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6102 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6103 // beyond the acceptable, even though the footprint would be smaller. 6104 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6105 // avoid additional bloat. 6106 // 6107 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6108 void MacroAssembler::encode_iso_array(Register src, Register dst, 6109 Register len, Register res, bool ascii, 6110 FloatRegister vtmp0, FloatRegister vtmp1, 6111 FloatRegister vtmp2, FloatRegister vtmp3, 6112 FloatRegister vtmp4, FloatRegister vtmp5) 6113 { 6114 Register cnt = res; 6115 Register max = rscratch1; 6116 Register chk = rscratch2; 6117 6118 prfm(Address(src), PLDL1STRM); 6119 movw(cnt, len); 6120 6121 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6122 6123 Label LOOP_32, DONE_32, FAIL_32; 6124 6125 BIND(LOOP_32); 6126 { 6127 cmpw(cnt, 32); 6128 br(LT, DONE_32); 6129 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6130 // Extract lower bytes. 6131 FloatRegister vlo0 = vtmp4; 6132 FloatRegister vlo1 = vtmp5; 6133 uzp1(vlo0, T16B, vtmp0, vtmp1); 6134 uzp1(vlo1, T16B, vtmp2, vtmp3); 6135 // Merge bits... 6136 orr(vtmp0, T16B, vtmp0, vtmp1); 6137 orr(vtmp2, T16B, vtmp2, vtmp3); 6138 // Extract merged upper bytes. 6139 FloatRegister vhix = vtmp0; 6140 uzp2(vhix, T16B, vtmp0, vtmp2); 6141 // ISO-check on hi-parts (all zero). 6142 // ASCII-check on lo-parts (no sign). 6143 FloatRegister vlox = vtmp1; // Merge lower bytes. 6144 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6145 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6146 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6147 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6148 ASCII(orr(chk, chk, max)); 6149 cbnz(chk, FAIL_32); 6150 subw(cnt, cnt, 32); 6151 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6152 b(LOOP_32); 6153 } 6154 BIND(FAIL_32); 6155 sub(src, src, 64); 6156 BIND(DONE_32); 6157 6158 Label LOOP_8, SKIP_8; 6159 6160 BIND(LOOP_8); 6161 { 6162 cmpw(cnt, 8); 6163 br(LT, SKIP_8); 6164 FloatRegister vhi = vtmp0; 6165 FloatRegister vlo = vtmp1; 6166 ld1(vtmp3, T8H, src); 6167 uzp1(vlo, T16B, vtmp3, vtmp3); 6168 uzp2(vhi, T16B, vtmp3, vtmp3); 6169 // ISO-check on hi-parts (all zero). 6170 // ASCII-check on lo-parts (no sign). 6171 ASCII(cm(LT, vtmp2, T16B, vlo)); 6172 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6173 ASCII(umov(max, vtmp2, B, 0)); 6174 ASCII(orr(chk, chk, max)); 6175 cbnz(chk, SKIP_8); 6176 6177 strd(vlo, Address(post(dst, 8))); 6178 subw(cnt, cnt, 8); 6179 add(src, src, 16); 6180 b(LOOP_8); 6181 } 6182 BIND(SKIP_8); 6183 6184 #undef ASCII 6185 6186 Label LOOP, DONE; 6187 6188 cbz(cnt, DONE); 6189 BIND(LOOP); 6190 { 6191 Register chr = rscratch1; 6192 ldrh(chr, Address(post(src, 2))); 6193 tst(chr, ascii ? 0xff80 : 0xff00); 6194 br(NE, DONE); 6195 strb(chr, Address(post(dst, 1))); 6196 subs(cnt, cnt, 1); 6197 br(GT, LOOP); 6198 } 6199 BIND(DONE); 6200 // Return index where we stopped. 6201 subw(res, len, cnt); 6202 } 6203 6204 // Inflate byte[] array to char[]. 6205 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6206 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6207 FloatRegister vtmp1, FloatRegister vtmp2, 6208 FloatRegister vtmp3, Register tmp4) { 6209 Label big, done, after_init, to_stub; 6210 6211 assert_different_registers(src, dst, len, tmp4, rscratch1); 6212 6213 fmovd(vtmp1, 0.0); 6214 lsrw(tmp4, len, 3); 6215 bind(after_init); 6216 cbnzw(tmp4, big); 6217 // Short string: less than 8 bytes. 6218 { 6219 Label loop, tiny; 6220 6221 cmpw(len, 4); 6222 br(LT, tiny); 6223 // Use SIMD to do 4 bytes. 6224 ldrs(vtmp2, post(src, 4)); 6225 zip1(vtmp3, T8B, vtmp2, vtmp1); 6226 subw(len, len, 4); 6227 strd(vtmp3, post(dst, 8)); 6228 6229 cbzw(len, done); 6230 6231 // Do the remaining bytes by steam. 6232 bind(loop); 6233 ldrb(tmp4, post(src, 1)); 6234 strh(tmp4, post(dst, 2)); 6235 subw(len, len, 1); 6236 6237 bind(tiny); 6238 cbnz(len, loop); 6239 6240 b(done); 6241 } 6242 6243 if (SoftwarePrefetchHintDistance >= 0) { 6244 bind(to_stub); 6245 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6246 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6247 address tpc = trampoline_call(stub); 6248 if (tpc == nullptr) { 6249 DEBUG_ONLY(reset_labels(big, done)); 6250 postcond(pc() == badAddress); 6251 return nullptr; 6252 } 6253 b(after_init); 6254 } 6255 6256 // Unpack the bytes 8 at a time. 6257 bind(big); 6258 { 6259 Label loop, around, loop_last, loop_start; 6260 6261 if (SoftwarePrefetchHintDistance >= 0) { 6262 const int large_loop_threshold = (64 + 16)/8; 6263 ldrd(vtmp2, post(src, 8)); 6264 andw(len, len, 7); 6265 cmp(tmp4, (u1)large_loop_threshold); 6266 br(GE, to_stub); 6267 b(loop_start); 6268 6269 bind(loop); 6270 ldrd(vtmp2, post(src, 8)); 6271 bind(loop_start); 6272 subs(tmp4, tmp4, 1); 6273 br(EQ, loop_last); 6274 zip1(vtmp2, T16B, vtmp2, vtmp1); 6275 ldrd(vtmp3, post(src, 8)); 6276 st1(vtmp2, T8H, post(dst, 16)); 6277 subs(tmp4, tmp4, 1); 6278 zip1(vtmp3, T16B, vtmp3, vtmp1); 6279 st1(vtmp3, T8H, post(dst, 16)); 6280 br(NE, loop); 6281 b(around); 6282 bind(loop_last); 6283 zip1(vtmp2, T16B, vtmp2, vtmp1); 6284 st1(vtmp2, T8H, post(dst, 16)); 6285 bind(around); 6286 cbz(len, done); 6287 } else { 6288 andw(len, len, 7); 6289 bind(loop); 6290 ldrd(vtmp2, post(src, 8)); 6291 sub(tmp4, tmp4, 1); 6292 zip1(vtmp3, T16B, vtmp2, vtmp1); 6293 st1(vtmp3, T8H, post(dst, 16)); 6294 cbnz(tmp4, loop); 6295 } 6296 } 6297 6298 // Do the tail of up to 8 bytes. 6299 add(src, src, len); 6300 ldrd(vtmp3, Address(src, -8)); 6301 add(dst, dst, len, ext::uxtw, 1); 6302 zip1(vtmp3, T16B, vtmp3, vtmp1); 6303 strq(vtmp3, Address(dst, -16)); 6304 6305 bind(done); 6306 postcond(pc() != badAddress); 6307 return pc(); 6308 } 6309 6310 // Compress char[] array to byte[]. 6311 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6312 // Return the array length if every element in array can be encoded, 6313 // otherwise, the index of first non-latin1 (> 0xff) character. 6314 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6315 Register res, 6316 FloatRegister tmp0, FloatRegister tmp1, 6317 FloatRegister tmp2, FloatRegister tmp3, 6318 FloatRegister tmp4, FloatRegister tmp5) { 6319 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6320 } 6321 6322 // java.math.round(double a) 6323 // Returns the closest long to the argument, with ties rounding to 6324 // positive infinity. This requires some fiddling for corner 6325 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6326 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6327 FloatRegister ftmp) { 6328 Label DONE; 6329 BLOCK_COMMENT("java_round_double: { "); 6330 fmovd(rscratch1, src); 6331 // Use RoundToNearestTiesAway unless src small and -ve. 6332 fcvtasd(dst, src); 6333 // Test if src >= 0 || abs(src) >= 0x1.0p52 6334 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6335 mov(rscratch2, julong_cast(0x1.0p52)); 6336 cmp(rscratch1, rscratch2); 6337 br(HS, DONE); { 6338 // src < 0 && abs(src) < 0x1.0p52 6339 // src may have a fractional part, so add 0.5 6340 fmovd(ftmp, 0.5); 6341 faddd(ftmp, src, ftmp); 6342 // Convert double to jlong, use RoundTowardsNegative 6343 fcvtmsd(dst, ftmp); 6344 } 6345 bind(DONE); 6346 BLOCK_COMMENT("} java_round_double"); 6347 } 6348 6349 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6350 FloatRegister ftmp) { 6351 Label DONE; 6352 BLOCK_COMMENT("java_round_float: { "); 6353 fmovs(rscratch1, src); 6354 // Use RoundToNearestTiesAway unless src small and -ve. 6355 fcvtassw(dst, src); 6356 // Test if src >= 0 || abs(src) >= 0x1.0p23 6357 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6358 mov(rscratch2, jint_cast(0x1.0p23f)); 6359 cmp(rscratch1, rscratch2); 6360 br(HS, DONE); { 6361 // src < 0 && |src| < 0x1.0p23 6362 // src may have a fractional part, so add 0.5 6363 fmovs(ftmp, 0.5f); 6364 fadds(ftmp, src, ftmp); 6365 // Convert float to jint, use RoundTowardsNegative 6366 fcvtmssw(dst, ftmp); 6367 } 6368 bind(DONE); 6369 BLOCK_COMMENT("} java_round_float"); 6370 } 6371 6372 // get_thread() can be called anywhere inside generated code so we 6373 // need to save whatever non-callee save context might get clobbered 6374 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6375 // the call setup code. 6376 // 6377 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6378 // On other systems, the helper is a usual C function. 6379 // 6380 void MacroAssembler::get_thread(Register dst) { 6381 RegSet saved_regs = 6382 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6383 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6384 6385 protect_return_address(); 6386 push(saved_regs, sp); 6387 6388 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6389 blr(lr); 6390 if (dst != c_rarg0) { 6391 mov(dst, c_rarg0); 6392 } 6393 6394 pop(saved_regs, sp); 6395 authenticate_return_address(); 6396 } 6397 6398 #ifdef COMPILER2 6399 // C2 compiled method's prolog code 6400 // Moved here from aarch64.ad to support Valhalla code belows 6401 void MacroAssembler::verified_entry(Compile* C, int sp_inc) { 6402 if (C->clinit_barrier_on_entry()) { 6403 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started"); 6404 6405 Label L_skip_barrier; 6406 6407 mov_metadata(rscratch2, C->method()->holder()->constant_encoding()); 6408 clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 6409 far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 6410 bind(L_skip_barrier); 6411 } 6412 6413 if (C->max_vector_size() > 0) { 6414 reinitialize_ptrue(); 6415 } 6416 6417 int bangsize = C->output()->bang_size_in_bytes(); 6418 if (C->output()->need_stack_bang(bangsize)) 6419 generate_stack_overflow_check(bangsize); 6420 6421 // n.b. frame size includes space for return pc and rfp 6422 const long framesize = C->output()->frame_size_in_bytes(); 6423 build_frame(framesize); 6424 6425 if (C->needs_stack_repair()) { 6426 save_stack_increment(sp_inc, framesize); 6427 } 6428 6429 if (VerifyStackAtCalls) { 6430 Unimplemented(); 6431 } 6432 } 6433 #endif // COMPILER2 6434 6435 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6436 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6437 // An inline type might be returned. If fields are in registers we 6438 // need to allocate an inline type instance and initialize it with 6439 // the value of the fields. 6440 Label skip; 6441 // We only need a new buffered inline type if a new one is not returned 6442 tbz(r0, 0, skip); 6443 int call_offset = -1; 6444 6445 // Be careful not to clobber r1-7 which hold returned fields 6446 // Also do not use callee-saved registers as these may be live in the interpreter 6447 Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12; 6448 6449 // The following code is similar to allocate_instance but has some slight differences, 6450 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6451 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6452 Label slow_case; 6453 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6454 mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed 6455 6456 if (vk != nullptr) { 6457 // Called from C1, where the return type is statically known. 6458 movptr(klass, (intptr_t)vk->get_InlineKlass()); 6459 jint obj_size = vk->layout_helper(); 6460 assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6461 if (UseTLAB) { 6462 tlab_allocate(r0, noreg, obj_size, tmp1, tmp2, slow_case); 6463 } else { 6464 b(slow_case); 6465 } 6466 } else { 6467 // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01) 6468 andr(klass, r0, -2); 6469 ldrw(tmp2, Address(klass, Klass::layout_helper_offset())); 6470 if (UseTLAB) { 6471 tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case); 6472 } else { 6473 b(slow_case); 6474 } 6475 } 6476 if (UseTLAB) { 6477 // 2. Initialize buffered inline instance header 6478 Register buffer_obj = r0; 6479 mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value()); 6480 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes())); 6481 store_klass_gap(buffer_obj, zr); 6482 if (vk == nullptr) { 6483 // store_klass corrupts klass, so save it for later use (interpreter case only). 6484 mov(tmp1, klass); 6485 } 6486 store_klass(buffer_obj, klass); 6487 // 3. Initialize its fields with an inline class specific handler 6488 if (vk != nullptr) { 6489 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6490 } else { 6491 // tmp1 holds klass preserved above 6492 ldr(tmp1, Address(tmp1, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6493 ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset())); 6494 blr(tmp1); 6495 } 6496 6497 membar(Assembler::StoreStore); 6498 b(skip); 6499 } else { 6500 // Must have already branched to slow_case above. 6501 DEBUG_ONLY(should_not_reach_here()); 6502 } 6503 bind(slow_case); 6504 // We failed to allocate a new inline type, fall back to a runtime 6505 // call. Some oop field may be live in some registers but we can't 6506 // tell. That runtime call will take care of preserving them 6507 // across a GC if there's one. 6508 mov(r0, r0_preserved); 6509 6510 if (from_interpreter) { 6511 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6512 } else { 6513 far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6514 call_offset = offset(); 6515 } 6516 membar(Assembler::StoreStore); 6517 6518 bind(skip); 6519 return call_offset; 6520 } 6521 6522 // Move a value between registers/stack slots and update the reg_state 6523 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6524 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6525 if (reg_state[to->value()] == reg_written) { 6526 return true; // Already written 6527 } 6528 6529 if (from != to && bt != T_VOID) { 6530 if (reg_state[to->value()] == reg_readonly) { 6531 return false; // Not yet writable 6532 } 6533 if (from->is_reg()) { 6534 if (to->is_reg()) { 6535 if (from->is_Register() && to->is_Register()) { 6536 mov(to->as_Register(), from->as_Register()); 6537 } else if (from->is_FloatRegister() && to->is_FloatRegister()) { 6538 fmovd(to->as_FloatRegister(), from->as_FloatRegister()); 6539 } else { 6540 ShouldNotReachHere(); 6541 } 6542 } else { 6543 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size; 6544 Address to_addr = Address(sp, st_off); 6545 if (from->is_FloatRegister()) { 6546 if (bt == T_DOUBLE) { 6547 strd(from->as_FloatRegister(), to_addr); 6548 } else { 6549 assert(bt == T_FLOAT, "must be float"); 6550 strs(from->as_FloatRegister(), to_addr); 6551 } 6552 } else { 6553 str(from->as_Register(), to_addr); 6554 } 6555 } 6556 } else { 6557 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size); 6558 if (to->is_reg()) { 6559 if (to->is_FloatRegister()) { 6560 if (bt == T_DOUBLE) { 6561 ldrd(to->as_FloatRegister(), from_addr); 6562 } else { 6563 assert(bt == T_FLOAT, "must be float"); 6564 ldrs(to->as_FloatRegister(), from_addr); 6565 } 6566 } else { 6567 ldr(to->as_Register(), from_addr); 6568 } 6569 } else { 6570 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size; 6571 ldr(rscratch1, from_addr); 6572 str(rscratch1, Address(sp, st_off)); 6573 } 6574 } 6575 } 6576 6577 // Update register states 6578 reg_state[from->value()] = reg_writable; 6579 reg_state[to->value()] = reg_written; 6580 return true; 6581 } 6582 6583 // Calculate the extra stack space required for packing or unpacking inline 6584 // args and adjust the stack pointer 6585 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6586 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size; 6587 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6588 assert(sp_inc > 0, "sanity"); 6589 6590 // Save a copy of the FP and LR here for deoptimization patching and frame walking 6591 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6592 6593 // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame 6594 if (sp_inc < (1 << 9)) { 6595 sub(sp, sp, sp_inc); // Fits in an immediate 6596 } else { 6597 mov(rscratch1, sp_inc); 6598 sub(sp, sp, rscratch1); 6599 } 6600 6601 return sp_inc + 2 * wordSize; // Account for the FP/LR space 6602 } 6603 6604 // Read all fields from an inline type oop and store the values in registers/stack slots 6605 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6606 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6607 RegState reg_state[]) { 6608 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6609 assert(from->is_valid(), "source must be valid"); 6610 bool progress = false; 6611 #ifdef ASSERT 6612 const int start_offset = offset(); 6613 #endif 6614 6615 Label L_null, L_notNull; 6616 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 6617 Register tmp1 = r10; 6618 Register tmp2 = r11; 6619 Register fromReg = noreg; 6620 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 6621 bool done = true; 6622 bool mark_done = true; 6623 VMReg toReg; 6624 BasicType bt; 6625 // Check if argument requires a null check 6626 bool null_check = false; 6627 VMReg nullCheckReg; 6628 while (stream.next(nullCheckReg, bt)) { 6629 if (sig->at(stream.sig_index())._offset == -1) { 6630 null_check = true; 6631 break; 6632 } 6633 } 6634 stream.reset(sig_index, to_index); 6635 while (stream.next(toReg, bt)) { 6636 assert(toReg->is_valid(), "destination must be valid"); 6637 int idx = (int)toReg->value(); 6638 if (reg_state[idx] == reg_readonly) { 6639 if (idx != from->value()) { 6640 mark_done = false; 6641 } 6642 done = false; 6643 continue; 6644 } else if (reg_state[idx] == reg_written) { 6645 continue; 6646 } 6647 assert(reg_state[idx] == reg_writable, "must be writable"); 6648 reg_state[idx] = reg_written; 6649 progress = true; 6650 6651 if (fromReg == noreg) { 6652 if (from->is_reg()) { 6653 fromReg = from->as_Register(); 6654 } else { 6655 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size; 6656 ldr(tmp1, Address(sp, st_off)); 6657 fromReg = tmp1; 6658 } 6659 if (null_check) { 6660 // Nullable inline type argument, emit null check 6661 cbz(fromReg, L_null); 6662 } 6663 } 6664 int off = sig->at(stream.sig_index())._offset; 6665 if (off == -1) { 6666 assert(null_check, "Missing null check at"); 6667 if (toReg->is_stack()) { 6668 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size; 6669 mov(tmp2, 1); 6670 str(tmp2, Address(sp, st_off)); 6671 } else { 6672 mov(toReg->as_Register(), 1); 6673 } 6674 continue; 6675 } 6676 assert(off > 0, "offset in object should be positive"); 6677 Address fromAddr = Address(fromReg, off); 6678 if (!toReg->is_FloatRegister()) { 6679 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 6680 if (is_reference_type(bt)) { 6681 load_heap_oop(dst, fromAddr, rscratch1, rscratch2); 6682 } else { 6683 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 6684 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 6685 } 6686 if (toReg->is_stack()) { 6687 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size; 6688 str(dst, Address(sp, st_off)); 6689 } 6690 } else if (bt == T_DOUBLE) { 6691 ldrd(toReg->as_FloatRegister(), fromAddr); 6692 } else { 6693 assert(bt == T_FLOAT, "must be float"); 6694 ldrs(toReg->as_FloatRegister(), fromAddr); 6695 } 6696 } 6697 if (progress && null_check) { 6698 if (done) { 6699 b(L_notNull); 6700 bind(L_null); 6701 // Set IsInit field to zero to signal that the argument is null. 6702 // Also set all oop fields to zero to make the GC happy. 6703 stream.reset(sig_index, to_index); 6704 while (stream.next(toReg, bt)) { 6705 if (sig->at(stream.sig_index())._offset == -1 || 6706 bt == T_OBJECT || bt == T_ARRAY) { 6707 if (toReg->is_stack()) { 6708 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size; 6709 str(zr, Address(sp, st_off)); 6710 } else { 6711 mov(toReg->as_Register(), zr); 6712 } 6713 } 6714 } 6715 bind(L_notNull); 6716 } else { 6717 bind(L_null); 6718 } 6719 } 6720 6721 sig_index = stream.sig_index(); 6722 to_index = stream.regs_index(); 6723 6724 if (mark_done && reg_state[from->value()] != reg_written) { 6725 // This is okay because no one else will write to that slot 6726 reg_state[from->value()] = reg_writable; 6727 } 6728 from_index--; 6729 assert(progress || (start_offset == offset()), "should not emit code"); 6730 return done; 6731 } 6732 6733 // Pack fields back into an inline type oop 6734 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 6735 VMRegPair* from, int from_count, int& from_index, VMReg to, 6736 RegState reg_state[], Register val_array) { 6737 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 6738 assert(to->is_valid(), "destination must be valid"); 6739 6740 if (reg_state[to->value()] == reg_written) { 6741 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6742 return true; // Already written 6743 } 6744 6745 // The GC barrier expanded by store_heap_oop below may call into the 6746 // runtime so use callee-saved registers for any values that need to be 6747 // preserved. The GC barrier assembler should take care of saving the 6748 // Java argument registers. 6749 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 6750 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 6751 Register val_obj_tmp = r21; 6752 Register from_reg_tmp = r22; 6753 Register tmp1 = r14; 6754 Register tmp2 = r13; 6755 Register tmp3 = r12; 6756 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 6757 6758 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 6759 6760 if (reg_state[to->value()] == reg_readonly) { 6761 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 6762 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 6763 return false; // Not yet writable 6764 } 6765 val_obj = val_obj_tmp; 6766 } 6767 6768 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 6769 load_heap_oop(val_obj, Address(val_array, index), tmp1, tmp2); 6770 6771 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 6772 VMReg fromReg; 6773 BasicType bt; 6774 Label L_null; 6775 while (stream.next(fromReg, bt)) { 6776 assert(fromReg->is_valid(), "source must be valid"); 6777 reg_state[fromReg->value()] = reg_writable; 6778 6779 int off = sig->at(stream.sig_index())._offset; 6780 if (off == -1) { 6781 // Nullable inline type argument, emit null check 6782 Label L_notNull; 6783 if (fromReg->is_stack()) { 6784 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size; 6785 ldrb(tmp2, Address(sp, ld_off)); 6786 cbnz(tmp2, L_notNull); 6787 } else { 6788 cbnz(fromReg->as_Register(), L_notNull); 6789 } 6790 mov(val_obj, 0); 6791 b(L_null); 6792 bind(L_notNull); 6793 continue; 6794 } 6795 6796 assert(off > 0, "offset in object should be positive"); 6797 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 6798 6799 // Pack the scalarized field into the value object. 6800 Address dst(val_obj, off); 6801 6802 if (!fromReg->is_FloatRegister()) { 6803 Register src; 6804 if (fromReg->is_stack()) { 6805 src = from_reg_tmp; 6806 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size; 6807 load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false); 6808 } else { 6809 src = fromReg->as_Register(); 6810 } 6811 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 6812 if (is_reference_type(bt)) { 6813 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 6814 } else { 6815 store_sized_value(dst, src, size_in_bytes); 6816 } 6817 } else if (bt == T_DOUBLE) { 6818 strd(fromReg->as_FloatRegister(), dst); 6819 } else { 6820 assert(bt == T_FLOAT, "must be float"); 6821 strs(fromReg->as_FloatRegister(), dst); 6822 } 6823 } 6824 bind(L_null); 6825 sig_index = stream.sig_index(); 6826 from_index = stream.regs_index(); 6827 6828 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 6829 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 6830 assert(success, "to register must be writeable"); 6831 6832 return true; 6833 } 6834 6835 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 6836 return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg(); 6837 } 6838 6839 void MacroAssembler::cache_wb(Address line) { 6840 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6841 assert(line.index() == noreg, "index should be noreg"); 6842 assert(line.offset() == 0, "offset should be 0"); 6843 // would like to assert this 6844 // assert(line._ext.shift == 0, "shift should be zero"); 6845 if (VM_Version::supports_dcpop()) { 6846 // writeback using clear virtual address to point of persistence 6847 dc(Assembler::CVAP, line.base()); 6848 } else { 6849 // no need to generate anything as Unsafe.writebackMemory should 6850 // never invoke this stub 6851 } 6852 } 6853 6854 void MacroAssembler::cache_wbsync(bool is_pre) { 6855 // we only need a barrier post sync 6856 if (!is_pre) { 6857 membar(Assembler::AnyAny); 6858 } 6859 } 6860 6861 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6862 // Make sure that native code does not change SVE vector length. 6863 if (!UseSVE) return; 6864 Label verify_ok; 6865 movw(tmp, zr); 6866 sve_inc(tmp, B); 6867 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6868 br(EQ, verify_ok); 6869 stop("Error: SVE vector length has changed since jvm startup"); 6870 bind(verify_ok); 6871 } 6872 6873 void MacroAssembler::verify_ptrue() { 6874 Label verify_ok; 6875 if (!UseSVE) { 6876 return; 6877 } 6878 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6879 sve_dec(rscratch1, B); 6880 cbz(rscratch1, verify_ok); 6881 stop("Error: the preserved predicate register (p7) elements are not all true"); 6882 bind(verify_ok); 6883 } 6884 6885 void MacroAssembler::safepoint_isb() { 6886 isb(); 6887 #ifndef PRODUCT 6888 if (VerifyCrossModifyFence) { 6889 // Clear the thread state. 6890 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6891 } 6892 #endif 6893 } 6894 6895 #ifndef PRODUCT 6896 void MacroAssembler::verify_cross_modify_fence_not_required() { 6897 if (VerifyCrossModifyFence) { 6898 // Check if thread needs a cross modify fence. 6899 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6900 Label fence_not_required; 6901 cbz(rscratch1, fence_not_required); 6902 // If it does then fail. 6903 lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)); 6904 mov(c_rarg0, rthread); 6905 blr(rscratch1); 6906 bind(fence_not_required); 6907 } 6908 } 6909 #endif 6910 6911 void MacroAssembler::spin_wait() { 6912 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6913 switch (VM_Version::spin_wait_desc().inst()) { 6914 case SpinWait::NOP: 6915 nop(); 6916 break; 6917 case SpinWait::ISB: 6918 isb(); 6919 break; 6920 case SpinWait::YIELD: 6921 yield(); 6922 break; 6923 default: 6924 ShouldNotReachHere(); 6925 } 6926 } 6927 } 6928 6929 // Stack frame creation/removal 6930 6931 void MacroAssembler::enter(bool strip_ret_addr) { 6932 if (strip_ret_addr) { 6933 // Addresses can only be signed once. If there are multiple nested frames being created 6934 // in the same function, then the return address needs stripping first. 6935 strip_return_address(); 6936 } 6937 protect_return_address(); 6938 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6939 mov(rfp, sp); 6940 } 6941 6942 void MacroAssembler::leave() { 6943 mov(sp, rfp); 6944 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6945 authenticate_return_address(); 6946 } 6947 6948 // ROP Protection 6949 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6950 // destroying stack frames or whenever directly loading/storing the LR to memory. 6951 // If ROP protection is not set then these functions are no-ops. 6952 // For more details on PAC see pauth_aarch64.hpp. 6953 6954 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6955 // Uses value zero as the modifier. 6956 // 6957 void MacroAssembler::protect_return_address() { 6958 if (VM_Version::use_rop_protection()) { 6959 check_return_address(); 6960 paciaz(); 6961 } 6962 } 6963 6964 // Sign the return value in the given register. Use before updating the LR in the existing stack 6965 // frame for the current function. 6966 // Uses value zero as the modifier. 6967 // 6968 void MacroAssembler::protect_return_address(Register return_reg) { 6969 if (VM_Version::use_rop_protection()) { 6970 check_return_address(return_reg); 6971 paciza(return_reg); 6972 } 6973 } 6974 6975 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6976 // Uses value zero as the modifier. 6977 // 6978 void MacroAssembler::authenticate_return_address() { 6979 if (VM_Version::use_rop_protection()) { 6980 autiaz(); 6981 check_return_address(); 6982 } 6983 } 6984 6985 // Authenticate the return value in the given register. Use before updating the LR in the existing 6986 // stack frame for the current function. 6987 // Uses value zero as the modifier. 6988 // 6989 void MacroAssembler::authenticate_return_address(Register return_reg) { 6990 if (VM_Version::use_rop_protection()) { 6991 autiza(return_reg); 6992 check_return_address(return_reg); 6993 } 6994 } 6995 6996 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6997 // there is no guaranteed way of authenticating the LR. 6998 // 6999 void MacroAssembler::strip_return_address() { 7000 if (VM_Version::use_rop_protection()) { 7001 xpaclri(); 7002 } 7003 } 7004 7005 #ifndef PRODUCT 7006 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 7007 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 7008 // it is difficult to debug back to the callee function. 7009 // This function simply loads from the address in the given register. 7010 // Use directly after authentication to catch authentication failures. 7011 // Also use before signing to check that the pointer is valid and hasn't already been signed. 7012 // 7013 void MacroAssembler::check_return_address(Register return_reg) { 7014 if (VM_Version::use_rop_protection()) { 7015 ldr(zr, Address(return_reg)); 7016 } 7017 } 7018 #endif 7019 7020 // The java_calling_convention describes stack locations as ideal slots on 7021 // a frame with no abi restrictions. Since we must observe abi restrictions 7022 // (like the placement of the register window) the slots must be biased by 7023 // the following value. 7024 static int reg2offset_in(VMReg r) { 7025 // Account for saved rfp and lr 7026 // This should really be in_preserve_stack_slots 7027 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 7028 } 7029 7030 static int reg2offset_out(VMReg r) { 7031 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 7032 } 7033 7034 // On 64bit we will store integer like items to the stack as 7035 // 64bits items (AArch64 ABI) even though java would only store 7036 // 32bits for a parameter. On 32bit it will simply be 32bits 7037 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 7038 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 7039 if (src.first()->is_stack()) { 7040 if (dst.first()->is_stack()) { 7041 // stack to stack 7042 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7043 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7044 } else { 7045 // stack to reg 7046 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 7047 } 7048 } else if (dst.first()->is_stack()) { 7049 // reg to stack 7050 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 7051 } else { 7052 if (dst.first() != src.first()) { 7053 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 7054 } 7055 } 7056 } 7057 7058 // An oop arg. Must pass a handle not the oop itself 7059 void MacroAssembler::object_move( 7060 OopMap* map, 7061 int oop_handle_offset, 7062 int framesize_in_slots, 7063 VMRegPair src, 7064 VMRegPair dst, 7065 bool is_receiver, 7066 int* receiver_offset) { 7067 7068 // must pass a handle. First figure out the location we use as a handle 7069 7070 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 7071 7072 // See if oop is null if it is we need no handle 7073 7074 if (src.first()->is_stack()) { 7075 7076 // Oop is already on the stack as an argument 7077 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 7078 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 7079 if (is_receiver) { 7080 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 7081 } 7082 7083 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 7084 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 7085 // conditionally move a null 7086 cmp(rscratch1, zr); 7087 csel(rHandle, zr, rHandle, Assembler::EQ); 7088 } else { 7089 7090 // Oop is in an a register we must store it to the space we reserve 7091 // on the stack for oop_handles and pass a handle if oop is non-null 7092 7093 const Register rOop = src.first()->as_Register(); 7094 int oop_slot; 7095 if (rOop == j_rarg0) 7096 oop_slot = 0; 7097 else if (rOop == j_rarg1) 7098 oop_slot = 1; 7099 else if (rOop == j_rarg2) 7100 oop_slot = 2; 7101 else if (rOop == j_rarg3) 7102 oop_slot = 3; 7103 else if (rOop == j_rarg4) 7104 oop_slot = 4; 7105 else if (rOop == j_rarg5) 7106 oop_slot = 5; 7107 else if (rOop == j_rarg6) 7108 oop_slot = 6; 7109 else { 7110 assert(rOop == j_rarg7, "wrong register"); 7111 oop_slot = 7; 7112 } 7113 7114 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 7115 int offset = oop_slot*VMRegImpl::stack_slot_size; 7116 7117 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 7118 // Store oop in handle area, may be null 7119 str(rOop, Address(sp, offset)); 7120 if (is_receiver) { 7121 *receiver_offset = offset; 7122 } 7123 7124 cmp(rOop, zr); 7125 lea(rHandle, Address(sp, offset)); 7126 // conditionally move a null 7127 csel(rHandle, zr, rHandle, Assembler::EQ); 7128 } 7129 7130 // If arg is on the stack then place it otherwise it is already in correct reg. 7131 if (dst.first()->is_stack()) { 7132 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 7133 } 7134 } 7135 7136 // A float arg may have to do float reg int reg conversion 7137 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 7138 if (src.first()->is_stack()) { 7139 if (dst.first()->is_stack()) { 7140 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 7141 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 7142 } else { 7143 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7144 } 7145 } else if (src.first() != dst.first()) { 7146 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7147 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7148 else 7149 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7150 } 7151 } 7152 7153 // A long move 7154 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 7155 if (src.first()->is_stack()) { 7156 if (dst.first()->is_stack()) { 7157 // stack to stack 7158 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7159 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7160 } else { 7161 // stack to reg 7162 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 7163 } 7164 } else if (dst.first()->is_stack()) { 7165 // reg to stack 7166 // Do we really have to sign extend??? 7167 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 7168 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 7169 } else { 7170 if (dst.first() != src.first()) { 7171 mov(dst.first()->as_Register(), src.first()->as_Register()); 7172 } 7173 } 7174 } 7175 7176 7177 // A double move 7178 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 7179 if (src.first()->is_stack()) { 7180 if (dst.first()->is_stack()) { 7181 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7182 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7183 } else { 7184 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7185 } 7186 } else if (src.first() != dst.first()) { 7187 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7188 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7189 else 7190 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7191 } 7192 } 7193 7194 // Implements lightweight-locking. 7195 // Branches to slow upon failure to lock the object, with ZF cleared. 7196 // Falls through upon success with ZF set. 7197 // 7198 // - obj: the object to be locked 7199 // - hdr: the header, already loaded from obj, will be destroyed 7200 // - t1, t2: temporary registers, will be destroyed 7201 void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) { 7202 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7203 assert_different_registers(obj, hdr, t1, t2, rscratch1); 7204 7205 // Check if we would have space on lock-stack for the object. 7206 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7207 cmpw(t1, (unsigned)LockStack::end_offset() - 1); 7208 br(Assembler::GT, slow); 7209 7210 // Load (object->mark() | 1) into hdr 7211 orr(hdr, hdr, markWord::unlocked_value); 7212 if (EnableValhalla) { 7213 // Mask inline_type bit such that we go to the slow path if object is an inline type 7214 andr(hdr, hdr, ~((int) markWord::inline_type_bit_in_place)); 7215 } 7216 7217 // Clear lock-bits, into t2 7218 eor(t2, hdr, markWord::unlocked_value); 7219 // Try to swing header from unlocked to locked 7220 // Clobbers rscratch1 when UseLSE is false 7221 cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword, 7222 /*acquire*/ true, /*release*/ true, /*weak*/ false, t1); 7223 br(Assembler::NE, slow); 7224 7225 // After successful lock, push object on lock-stack 7226 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7227 str(obj, Address(rthread, t1)); 7228 addw(t1, t1, oopSize); 7229 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7230 } 7231 7232 // Implements lightweight-unlocking. 7233 // Branches to slow upon failure, with ZF cleared. 7234 // Falls through upon success, with ZF set. 7235 // 7236 // - obj: the object to be unlocked 7237 // - hdr: the (pre-loaded) header of the object 7238 // - t1, t2: temporary registers 7239 void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) { 7240 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7241 assert_different_registers(obj, hdr, t1, t2, rscratch1); 7242 7243 #ifdef ASSERT 7244 { 7245 // The following checks rely on the fact that LockStack is only ever modified by 7246 // its owning thread, even if the lock got inflated concurrently; removal of LockStack 7247 // entries after inflation will happen delayed in that case. 7248 7249 // Check for lock-stack underflow. 7250 Label stack_ok; 7251 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7252 cmpw(t1, (unsigned)LockStack::start_offset()); 7253 br(Assembler::GT, stack_ok); 7254 STOP("Lock-stack underflow"); 7255 bind(stack_ok); 7256 } 7257 { 7258 // Check if the top of the lock-stack matches the unlocked object. 7259 Label tos_ok; 7260 subw(t1, t1, oopSize); 7261 ldr(t1, Address(rthread, t1)); 7262 cmpoop(t1, obj); 7263 br(Assembler::EQ, tos_ok); 7264 STOP("Top of lock-stack does not match the unlocked object"); 7265 bind(tos_ok); 7266 } 7267 { 7268 // Check that hdr is fast-locked. 7269 Label hdr_ok; 7270 tst(hdr, markWord::lock_mask_in_place); 7271 br(Assembler::EQ, hdr_ok); 7272 STOP("Header is not fast-locked"); 7273 bind(hdr_ok); 7274 } 7275 #endif 7276 7277 // Load the new header (unlocked) into t1 7278 orr(t1, hdr, markWord::unlocked_value); 7279 7280 // Try to swing header from locked to unlocked 7281 // Clobbers rscratch1 when UseLSE is false 7282 cmpxchg(obj, hdr, t1, Assembler::xword, 7283 /*acquire*/ true, /*release*/ true, /*weak*/ false, t2); 7284 br(Assembler::NE, slow); 7285 7286 // After successful unlock, pop object from lock-stack 7287 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7288 subw(t1, t1, oopSize); 7289 #ifdef ASSERT 7290 str(zr, Address(rthread, t1)); 7291 #endif 7292 strw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7293 }