1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "ci/ciInlineKlass.hpp" 31 #include "code/compiledIC.hpp" 32 #include "compiler/compileTask.hpp" 33 #include "compiler/disassembler.hpp" 34 #include "compiler/oopMap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/barrierSetAssembler.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "gc/shared/cardTable.hpp" 39 #include "gc/shared/collectedHeap.hpp" 40 #include "gc/shared/tlab_globals.hpp" 41 #include "interpreter/bytecodeHistogram.hpp" 42 #include "interpreter/interpreter.hpp" 43 #include "jvm.h" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "nativeInst_aarch64.hpp" 47 #include "oops/accessDecorators.hpp" 48 #include "oops/compressedKlass.inline.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/klass.inline.hpp" 51 #include "oops/resolvedFieldEntry.hpp" 52 #include "runtime/continuation.hpp" 53 #include "runtime/icache.hpp" 54 #include "runtime/interfaceSupport.inline.hpp" 55 #include "runtime/javaThread.hpp" 56 #include "runtime/jniHandles.inline.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/signature_cc.hpp" 59 #include "runtime/stubRoutines.hpp" 60 #include "utilities/globalDefinitions.hpp" 61 #include "utilities/powerOfTwo.hpp" 62 #include "vmreg_aarch64.inline.hpp" 63 #ifdef COMPILER1 64 #include "c1/c1_LIRAssembler.hpp" 65 #endif 66 #ifdef COMPILER2 67 #include "oops/oop.hpp" 68 #include "opto/compile.hpp" 69 #include "opto/node.hpp" 70 #include "opto/output.hpp" 71 #endif 72 73 #include <sys/types.h> 74 75 #ifdef PRODUCT 76 #define BLOCK_COMMENT(str) /* nothing */ 77 #else 78 #define BLOCK_COMMENT(str) block_comment(str) 79 #endif 80 #define STOP(str) stop(str); 81 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 82 83 #ifdef ASSERT 84 extern "C" void disnm(intptr_t p); 85 #endif 86 // Target-dependent relocation processing 87 // 88 // Instruction sequences whose target may need to be retrieved or 89 // patched are distinguished by their leading instruction, sorting 90 // them into three main instruction groups and related subgroups. 91 // 92 // 1) Branch, Exception and System (insn count = 1) 93 // 1a) Unconditional branch (immediate): 94 // b/bl imm19 95 // 1b) Compare & branch (immediate): 96 // cbz/cbnz Rt imm19 97 // 1c) Test & branch (immediate): 98 // tbz/tbnz Rt imm14 99 // 1d) Conditional branch (immediate): 100 // b.cond imm19 101 // 102 // 2) Loads and Stores (insn count = 1) 103 // 2a) Load register literal: 104 // ldr Rt imm19 105 // 106 // 3) Data Processing Immediate (insn count = 2 or 3) 107 // 3a) PC-rel. addressing 108 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 109 // adr/adrp Rx imm21; add Ry Rx #imm12 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 111 // adr/adrp Rx imm21 112 // adr/adrp Rx imm21; movk Rx #imm16<<32 113 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 114 // The latter form can only happen when the target is an 115 // ExternalAddress, and (by definition) ExternalAddresses don't 116 // move. Because of that property, there is never any need to 117 // patch the last of the three instructions. However, 118 // MacroAssembler::target_addr_for_insn takes all three 119 // instructions into account and returns the correct address. 120 // 3b) Move wide (immediate) 121 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 122 // 123 // A switch on a subset of the instruction's bits provides an 124 // efficient dispatch to these subcases. 125 // 126 // insn[28:26] -> main group ('x' == don't care) 127 // 00x -> UNALLOCATED 128 // 100 -> Data Processing Immediate 129 // 101 -> Branch, Exception and System 130 // x1x -> Loads and Stores 131 // 132 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 133 // n.b. in some cases extra bits need to be checked to verify the 134 // instruction is as expected 135 // 136 // 1) ... xx101x Branch, Exception and System 137 // 1a) 00___x Unconditional branch (immediate) 138 // 1b) 01___0 Compare & branch (immediate) 139 // 1c) 01___1 Test & branch (immediate) 140 // 1d) 10___0 Conditional branch (immediate) 141 // other Should not happen 142 // 143 // 2) ... xxx1x0 Loads and Stores 144 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 145 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 146 // strictly should be 64 bit non-FP/SIMD i.e. 147 // 0101_000 (i.e. requires insn[31:24] == 01011000) 148 // 149 // 3) ... xx100x Data Processing Immediate 150 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 151 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 152 // strictly should be 64 bit movz #imm16<<0 153 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 154 // 155 class RelocActions { 156 protected: 157 typedef int (*reloc_insn)(address insn_addr, address &target); 158 159 virtual reloc_insn adrpMem() = 0; 160 virtual reloc_insn adrpAdd() = 0; 161 virtual reloc_insn adrpMovk() = 0; 162 163 const address _insn_addr; 164 const uint32_t _insn; 165 166 static uint32_t insn_at(address insn_addr, int n) { 167 return ((uint32_t*)insn_addr)[n]; 168 } 169 uint32_t insn_at(int n) const { 170 return insn_at(_insn_addr, n); 171 } 172 173 public: 174 175 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 176 RelocActions(address insn_addr, uint32_t insn) 177 : _insn_addr(insn_addr), _insn(insn) {} 178 179 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 180 virtual int conditionalBranch(address insn_addr, address &target) = 0; 181 virtual int testAndBranch(address insn_addr, address &target) = 0; 182 virtual int loadStore(address insn_addr, address &target) = 0; 183 virtual int adr(address insn_addr, address &target) = 0; 184 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 185 virtual int immediate(address insn_addr, address &target) = 0; 186 virtual void verify(address insn_addr, address &target) = 0; 187 188 int ALWAYSINLINE run(address insn_addr, address &target) { 189 int instructions = 1; 190 191 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 192 switch(dispatch) { 193 case 0b001010: 194 case 0b001011: { 195 instructions = unconditionalBranch(insn_addr, target); 196 break; 197 } 198 case 0b101010: // Conditional branch (immediate) 199 case 0b011010: { // Compare & branch (immediate) 200 instructions = conditionalBranch(insn_addr, target); 201 break; 202 } 203 case 0b011011: { 204 instructions = testAndBranch(insn_addr, target); 205 break; 206 } 207 case 0b001100: 208 case 0b001110: 209 case 0b011100: 210 case 0b011110: 211 case 0b101100: 212 case 0b101110: 213 case 0b111100: 214 case 0b111110: { 215 // load/store 216 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 217 // Load register (literal) 218 instructions = loadStore(insn_addr, target); 219 break; 220 } else { 221 // nothing to do 222 assert(target == nullptr, "did not expect to relocate target for polling page load"); 223 } 224 break; 225 } 226 case 0b001000: 227 case 0b011000: 228 case 0b101000: 229 case 0b111000: { 230 // adr/adrp 231 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 232 int shift = Instruction_aarch64::extract(_insn, 31, 31); 233 if (shift) { 234 uint32_t insn2 = insn_at(1); 235 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 236 Instruction_aarch64::extract(_insn, 4, 0) == 237 Instruction_aarch64::extract(insn2, 9, 5)) { 238 instructions = adrp(insn_addr, target, adrpMem()); 239 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 240 Instruction_aarch64::extract(_insn, 4, 0) == 241 Instruction_aarch64::extract(insn2, 4, 0)) { 242 instructions = adrp(insn_addr, target, adrpAdd()); 243 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 244 Instruction_aarch64::extract(_insn, 4, 0) == 245 Instruction_aarch64::extract(insn2, 4, 0)) { 246 instructions = adrp(insn_addr, target, adrpMovk()); 247 } else { 248 ShouldNotReachHere(); 249 } 250 } else { 251 instructions = adr(insn_addr, target); 252 } 253 break; 254 } 255 case 0b001001: 256 case 0b011001: 257 case 0b101001: 258 case 0b111001: { 259 instructions = immediate(insn_addr, target); 260 break; 261 } 262 default: { 263 ShouldNotReachHere(); 264 } 265 } 266 267 verify(insn_addr, target); 268 return instructions * NativeInstruction::instruction_size; 269 } 270 }; 271 272 class Patcher : public RelocActions { 273 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 274 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 275 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 276 277 public: 278 Patcher(address insn_addr) : RelocActions(insn_addr) {} 279 280 virtual int unconditionalBranch(address insn_addr, address &target) { 281 intptr_t offset = (target - insn_addr) >> 2; 282 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 283 return 1; 284 } 285 virtual int conditionalBranch(address insn_addr, address &target) { 286 intptr_t offset = (target - insn_addr) >> 2; 287 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 288 return 1; 289 } 290 virtual int testAndBranch(address insn_addr, address &target) { 291 intptr_t offset = (target - insn_addr) >> 2; 292 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 293 return 1; 294 } 295 virtual int loadStore(address insn_addr, address &target) { 296 intptr_t offset = (target - insn_addr) >> 2; 297 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 298 return 1; 299 } 300 virtual int adr(address insn_addr, address &target) { 301 #ifdef ASSERT 302 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 303 #endif 304 // PC-rel. addressing 305 ptrdiff_t offset = target - insn_addr; 306 int offset_lo = offset & 3; 307 offset >>= 2; 308 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 309 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 310 return 1; 311 } 312 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 313 int instructions = 1; 314 #ifdef ASSERT 315 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 316 #endif 317 ptrdiff_t offset = target - insn_addr; 318 instructions = 2; 319 precond(inner != nullptr); 320 // Give the inner reloc a chance to modify the target. 321 address adjusted_target = target; 322 instructions = (*inner)(insn_addr, adjusted_target); 323 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 324 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 325 offset = adr_page - pc_page; 326 int offset_lo = offset & 3; 327 offset >>= 2; 328 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 329 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 330 return instructions; 331 } 332 static int adrpMem_impl(address insn_addr, address &target) { 333 uintptr_t dest = (uintptr_t)target; 334 int offset_lo = dest & 0xfff; 335 uint32_t insn2 = insn_at(insn_addr, 1); 336 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 337 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 338 guarantee(((dest >> size) << size) == dest, "misaligned target"); 339 return 2; 340 } 341 static int adrpAdd_impl(address insn_addr, address &target) { 342 uintptr_t dest = (uintptr_t)target; 343 int offset_lo = dest & 0xfff; 344 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 345 return 2; 346 } 347 static int adrpMovk_impl(address insn_addr, address &target) { 348 uintptr_t dest = uintptr_t(target); 349 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 350 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 351 target = address(dest); 352 return 2; 353 } 354 virtual int immediate(address insn_addr, address &target) { 355 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 356 uint64_t dest = (uint64_t)target; 357 // Move wide constant 358 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 359 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 360 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 361 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 362 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 363 return 3; 364 } 365 virtual void verify(address insn_addr, address &target) { 366 #ifdef ASSERT 367 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 368 if (!(address_is == target)) { 369 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 370 disnm((intptr_t)insn_addr); 371 assert(address_is == target, "should be"); 372 } 373 #endif 374 } 375 }; 376 377 // If insn1 and insn2 use the same register to form an address, either 378 // by an offsetted LDR or a simple ADD, return the offset. If the 379 // second instruction is an LDR, the offset may be scaled. 380 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 381 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 382 Instruction_aarch64::extract(insn1, 4, 0) == 383 Instruction_aarch64::extract(insn2, 9, 5)) { 384 // Load/store register (unsigned immediate) 385 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 386 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 387 byte_offset <<= size; 388 return true; 389 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 390 Instruction_aarch64::extract(insn1, 4, 0) == 391 Instruction_aarch64::extract(insn2, 4, 0)) { 392 // add (immediate) 393 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 394 return true; 395 } 396 return false; 397 } 398 399 class AArch64Decoder : public RelocActions { 400 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 401 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 402 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 403 404 public: 405 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 406 407 virtual int loadStore(address insn_addr, address &target) { 408 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 409 target = insn_addr + (offset << 2); 410 return 1; 411 } 412 virtual int unconditionalBranch(address insn_addr, address &target) { 413 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 414 target = insn_addr + (offset << 2); 415 return 1; 416 } 417 virtual int conditionalBranch(address insn_addr, address &target) { 418 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 419 target = address(((uint64_t)insn_addr + (offset << 2))); 420 return 1; 421 } 422 virtual int testAndBranch(address insn_addr, address &target) { 423 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 424 target = address(((uint64_t)insn_addr + (offset << 2))); 425 return 1; 426 } 427 virtual int adr(address insn_addr, address &target) { 428 // PC-rel. addressing 429 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 430 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 431 target = address((uint64_t)insn_addr + offset); 432 return 1; 433 } 434 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 435 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 436 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 437 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 438 int shift = 12; 439 offset <<= shift; 440 uint64_t target_page = ((uint64_t)insn_addr) + offset; 441 target_page &= ((uint64_t)-1) << shift; 442 uint32_t insn2 = insn_at(1); 443 target = address(target_page); 444 precond(inner != nullptr); 445 (*inner)(insn_addr, target); 446 return 2; 447 } 448 static int adrpMem_impl(address insn_addr, address &target) { 449 uint32_t insn2 = insn_at(insn_addr, 1); 450 // Load/store register (unsigned immediate) 451 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 452 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 453 byte_offset <<= size; 454 target += byte_offset; 455 return 2; 456 } 457 static int adrpAdd_impl(address insn_addr, address &target) { 458 uint32_t insn2 = insn_at(insn_addr, 1); 459 // add (immediate) 460 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 461 target += byte_offset; 462 return 2; 463 } 464 static int adrpMovk_impl(address insn_addr, address &target) { 465 uint32_t insn2 = insn_at(insn_addr, 1); 466 uint64_t dest = uint64_t(target); 467 dest = (dest & 0xffff0000ffffffff) | 468 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 469 target = address(dest); 470 471 // We know the destination 4k page. Maybe we have a third 472 // instruction. 473 uint32_t insn = insn_at(insn_addr, 0); 474 uint32_t insn3 = insn_at(insn_addr, 2); 475 ptrdiff_t byte_offset; 476 if (offset_for(insn, insn3, byte_offset)) { 477 target += byte_offset; 478 return 3; 479 } else { 480 return 2; 481 } 482 } 483 virtual int immediate(address insn_addr, address &target) { 484 uint32_t *insns = (uint32_t *)insn_addr; 485 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 486 // Move wide constant: movz, movk, movk. See movptr(). 487 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 488 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 489 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 490 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 491 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 492 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 493 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 494 return 3; 495 } 496 virtual void verify(address insn_addr, address &target) { 497 } 498 }; 499 500 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 501 AArch64Decoder decoder(insn_addr, insn); 502 address target; 503 decoder.run(insn_addr, target); 504 return target; 505 } 506 507 // Patch any kind of instruction; there may be several instructions. 508 // Return the total length (in bytes) of the instructions. 509 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 510 Patcher patcher(insn_addr); 511 return patcher.run(insn_addr, target); 512 } 513 514 int MacroAssembler::patch_oop(address insn_addr, address o) { 515 int instructions; 516 unsigned insn = *(unsigned*)insn_addr; 517 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 518 519 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 520 // narrow OOPs by setting the upper 16 bits in the first 521 // instruction. 522 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 523 // Move narrow OOP 524 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 525 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 526 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 527 instructions = 2; 528 } else { 529 // Move wide OOP 530 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 531 uintptr_t dest = (uintptr_t)o; 532 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 533 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 534 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 535 instructions = 3; 536 } 537 return instructions * NativeInstruction::instruction_size; 538 } 539 540 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 541 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 542 // We encode narrow ones by setting the upper 16 bits in the first 543 // instruction. 544 NativeInstruction *insn = nativeInstruction_at(insn_addr); 545 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 546 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 547 548 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 549 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 550 return 2 * NativeInstruction::instruction_size; 551 } 552 553 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 554 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 555 return nullptr; 556 } 557 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 558 } 559 560 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 561 if (acquire) { 562 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 563 ldar(tmp, tmp); 564 } else { 565 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 566 } 567 if (at_return) { 568 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 569 // we may safely use the sp instead to perform the stack watermark check. 570 cmp(in_nmethod ? sp : rfp, tmp); 571 br(Assembler::HI, slow_path); 572 } else { 573 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 574 } 575 } 576 577 void MacroAssembler::rt_call(address dest, Register tmp) { 578 CodeBlob *cb = CodeCache::find_blob(dest); 579 if (cb) { 580 far_call(RuntimeAddress(dest)); 581 } else { 582 lea(tmp, RuntimeAddress(dest)); 583 blr(tmp); 584 } 585 } 586 587 void MacroAssembler::push_cont_fastpath(Register java_thread) { 588 if (!Continuations::enabled()) return; 589 Label done; 590 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 591 cmp(sp, rscratch1); 592 br(Assembler::LS, done); 593 mov(rscratch1, sp); // we can't use sp as the source in str 594 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 595 bind(done); 596 } 597 598 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 599 if (!Continuations::enabled()) return; 600 Label done; 601 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 602 cmp(sp, rscratch1); 603 br(Assembler::LO, done); 604 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 605 bind(done); 606 } 607 608 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 609 // we must set sp to zero to clear frame 610 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 611 612 // must clear fp, so that compiled frames are not confused; it is 613 // possible that we need it only for debugging 614 if (clear_fp) { 615 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 616 } 617 618 // Always clear the pc because it could have been set by make_walkable() 619 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 620 } 621 622 // Calls to C land 623 // 624 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 625 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 626 // has to be reset to 0. This is required to allow proper stack traversal. 627 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 628 Register last_java_fp, 629 Register last_java_pc, 630 Register scratch) { 631 632 if (last_java_pc->is_valid()) { 633 str(last_java_pc, Address(rthread, 634 JavaThread::frame_anchor_offset() 635 + JavaFrameAnchor::last_Java_pc_offset())); 636 } 637 638 // determine last_java_sp register 639 if (last_java_sp == sp) { 640 mov(scratch, sp); 641 last_java_sp = scratch; 642 } else if (!last_java_sp->is_valid()) { 643 last_java_sp = esp; 644 } 645 646 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 647 648 // last_java_fp is optional 649 if (last_java_fp->is_valid()) { 650 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 651 } 652 } 653 654 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 655 Register last_java_fp, 656 address last_java_pc, 657 Register scratch) { 658 assert(last_java_pc != nullptr, "must provide a valid PC"); 659 660 adr(scratch, last_java_pc); 661 str(scratch, Address(rthread, 662 JavaThread::frame_anchor_offset() 663 + JavaFrameAnchor::last_Java_pc_offset())); 664 665 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 666 } 667 668 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 669 Register last_java_fp, 670 Label &L, 671 Register scratch) { 672 if (L.is_bound()) { 673 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 674 } else { 675 InstructionMark im(this); 676 L.add_patch_at(code(), locator()); 677 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 678 } 679 } 680 681 static inline bool target_needs_far_branch(address addr) { 682 // codecache size <= 128M 683 if (!MacroAssembler::far_branches()) { 684 return false; 685 } 686 // codecache size > 240M 687 if (MacroAssembler::codestub_branch_needs_far_jump()) { 688 return true; 689 } 690 // codecache size: 128M..240M 691 return !CodeCache::is_non_nmethod(addr); 692 } 693 694 void MacroAssembler::far_call(Address entry, Register tmp) { 695 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 696 assert(CodeCache::find_blob(entry.target()) != nullptr, 697 "destination of far call not found in code cache"); 698 assert(entry.rspec().type() == relocInfo::external_word_type 699 || entry.rspec().type() == relocInfo::runtime_call_type 700 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 701 if (target_needs_far_branch(entry.target())) { 702 uint64_t offset; 703 // We can use ADRP here because we know that the total size of 704 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 705 adrp(tmp, entry, offset); 706 add(tmp, tmp, offset); 707 blr(tmp); 708 } else { 709 bl(entry); 710 } 711 } 712 713 int MacroAssembler::far_jump(Address entry, Register tmp) { 714 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 715 assert(CodeCache::find_blob(entry.target()) != nullptr, 716 "destination of far call not found in code cache"); 717 assert(entry.rspec().type() == relocInfo::external_word_type 718 || entry.rspec().type() == relocInfo::runtime_call_type 719 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 720 address start = pc(); 721 if (target_needs_far_branch(entry.target())) { 722 uint64_t offset; 723 // We can use ADRP here because we know that the total size of 724 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 725 adrp(tmp, entry, offset); 726 add(tmp, tmp, offset); 727 br(tmp); 728 } else { 729 b(entry); 730 } 731 return pc() - start; 732 } 733 734 void MacroAssembler::reserved_stack_check() { 735 // testing if reserved zone needs to be enabled 736 Label no_reserved_zone_enabling; 737 738 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 739 cmp(sp, rscratch1); 740 br(Assembler::LO, no_reserved_zone_enabling); 741 742 enter(); // LR and FP are live. 743 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone))); 744 mov(c_rarg0, rthread); 745 blr(rscratch1); 746 leave(); 747 748 // We have already removed our own frame. 749 // throw_delayed_StackOverflowError will think that it's been 750 // called by our caller. 751 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 752 br(rscratch1); 753 should_not_reach_here(); 754 755 bind(no_reserved_zone_enabling); 756 } 757 758 static void pass_arg0(MacroAssembler* masm, Register arg) { 759 if (c_rarg0 != arg ) { 760 masm->mov(c_rarg0, arg); 761 } 762 } 763 764 static void pass_arg1(MacroAssembler* masm, Register arg) { 765 if (c_rarg1 != arg ) { 766 masm->mov(c_rarg1, arg); 767 } 768 } 769 770 static void pass_arg2(MacroAssembler* masm, Register arg) { 771 if (c_rarg2 != arg ) { 772 masm->mov(c_rarg2, arg); 773 } 774 } 775 776 static void pass_arg3(MacroAssembler* masm, Register arg) { 777 if (c_rarg3 != arg ) { 778 masm->mov(c_rarg3, arg); 779 } 780 } 781 782 void MacroAssembler::call_VM_base(Register oop_result, 783 Register java_thread, 784 Register last_java_sp, 785 address entry_point, 786 int number_of_arguments, 787 bool check_exceptions) { 788 // determine java_thread register 789 if (!java_thread->is_valid()) { 790 java_thread = rthread; 791 } 792 793 // determine last_java_sp register 794 if (!last_java_sp->is_valid()) { 795 last_java_sp = esp; 796 } 797 798 // debugging support 799 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 800 assert(java_thread == rthread, "unexpected register"); 801 #ifdef ASSERT 802 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 803 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 804 #endif // ASSERT 805 806 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 807 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 808 809 // push java thread (becomes first argument of C function) 810 811 mov(c_rarg0, java_thread); 812 813 // set last Java frame before call 814 assert(last_java_sp != rfp, "can't use rfp"); 815 816 Label l; 817 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 818 819 // do the call, remove parameters 820 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 821 822 // lr could be poisoned with PAC signature during throw_pending_exception 823 // if it was tail-call optimized by compiler, since lr is not callee-saved 824 // reload it with proper value 825 adr(lr, l); 826 827 // reset last Java frame 828 // Only interpreter should have to clear fp 829 reset_last_Java_frame(true); 830 831 // C++ interp handles this in the interpreter 832 check_and_handle_popframe(java_thread); 833 check_and_handle_earlyret(java_thread); 834 835 if (check_exceptions) { 836 // check for pending exceptions (java_thread is set upon return) 837 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 838 Label ok; 839 cbz(rscratch1, ok); 840 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 841 br(rscratch1); 842 bind(ok); 843 } 844 845 // get oop result if there is one and reset the value in the thread 846 if (oop_result->is_valid()) { 847 get_vm_result(oop_result, java_thread); 848 } 849 } 850 851 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 852 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 853 } 854 855 // Check the entry target is always reachable from any branch. 856 static bool is_always_within_branch_range(Address entry) { 857 const address target = entry.target(); 858 859 if (!CodeCache::contains(target)) { 860 // We always use trampolines for callees outside CodeCache. 861 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 862 return false; 863 } 864 865 if (!MacroAssembler::far_branches()) { 866 return true; 867 } 868 869 if (entry.rspec().type() == relocInfo::runtime_call_type) { 870 // Runtime calls are calls of a non-compiled method (stubs, adapters). 871 // Non-compiled methods stay forever in CodeCache. 872 // We check whether the longest possible branch is within the branch range. 873 assert(CodeCache::find_blob(target) != nullptr && 874 !CodeCache::find_blob(target)->is_nmethod(), 875 "runtime call of compiled method"); 876 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 877 const address left_longest_branch_start = CodeCache::low_bound(); 878 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 879 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 880 return is_reachable; 881 } 882 883 return false; 884 } 885 886 // Maybe emit a call via a trampoline. If the code cache is small 887 // trampolines won't be emitted. 888 address MacroAssembler::trampoline_call(Address entry) { 889 assert(entry.rspec().type() == relocInfo::runtime_call_type 890 || entry.rspec().type() == relocInfo::opt_virtual_call_type 891 || entry.rspec().type() == relocInfo::static_call_type 892 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 893 894 address target = entry.target(); 895 896 if (!is_always_within_branch_range(entry)) { 897 if (!in_scratch_emit_size()) { 898 // We don't want to emit a trampoline if C2 is generating dummy 899 // code during its branch shortening phase. 900 if (entry.rspec().type() == relocInfo::runtime_call_type) { 901 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 902 code()->share_trampoline_for(entry.target(), offset()); 903 } else { 904 address stub = emit_trampoline_stub(offset(), target); 905 if (stub == nullptr) { 906 postcond(pc() == badAddress); 907 return nullptr; // CodeCache is full 908 } 909 } 910 } 911 target = pc(); 912 } 913 914 address call_pc = pc(); 915 relocate(entry.rspec()); 916 bl(target); 917 918 postcond(pc() != badAddress); 919 return call_pc; 920 } 921 922 // Emit a trampoline stub for a call to a target which is too far away. 923 // 924 // code sequences: 925 // 926 // call-site: 927 // branch-and-link to <destination> or <trampoline stub> 928 // 929 // Related trampoline stub for this call site in the stub section: 930 // load the call target from the constant pool 931 // branch (LR still points to the call site above) 932 933 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 934 address dest) { 935 // Max stub size: alignment nop, TrampolineStub. 936 address stub = start_a_stub(max_trampoline_stub_size()); 937 if (stub == nullptr) { 938 return nullptr; // CodeBuffer::expand failed 939 } 940 941 // Create a trampoline stub relocation which relates this trampoline stub 942 // with the call instruction at insts_call_instruction_offset in the 943 // instructions code-section. 944 align(wordSize); 945 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 946 + insts_call_instruction_offset)); 947 const int stub_start_offset = offset(); 948 949 // Now, create the trampoline stub's code: 950 // - load the call 951 // - call 952 Label target; 953 ldr(rscratch1, target); 954 br(rscratch1); 955 bind(target); 956 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 957 "should be"); 958 emit_int64((int64_t)dest); 959 960 const address stub_start_addr = addr_at(stub_start_offset); 961 962 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 963 964 end_a_stub(); 965 return stub_start_addr; 966 } 967 968 int MacroAssembler::max_trampoline_stub_size() { 969 // Max stub size: alignment nop, TrampolineStub. 970 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 971 } 972 973 void MacroAssembler::emit_static_call_stub() { 974 // CompiledDirectCall::set_to_interpreted knows the 975 // exact layout of this stub. 976 977 isb(); 978 mov_metadata(rmethod, nullptr); 979 980 // Jump to the entry point of the c2i stub. 981 movptr(rscratch1, 0); 982 br(rscratch1); 983 } 984 985 int MacroAssembler::static_call_stub_size() { 986 // isb; movk; movz; movz; movk; movz; movz; br 987 return 8 * NativeInstruction::instruction_size; 988 } 989 990 void MacroAssembler::c2bool(Register x) { 991 // implements x == 0 ? 0 : 1 992 // note: must only look at least-significant byte of x 993 // since C-style booleans are stored in one byte 994 // only! (was bug) 995 tst(x, 0xff); 996 cset(x, Assembler::NE); 997 } 998 999 address MacroAssembler::ic_call(address entry, jint method_index) { 1000 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1001 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1002 // uintptr_t offset; 1003 // ldr_constant(rscratch2, const_ptr); 1004 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1005 return trampoline_call(Address(entry, rh)); 1006 } 1007 1008 int MacroAssembler::ic_check_size() { 1009 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1010 return NativeInstruction::instruction_size * 7; 1011 } else { 1012 return NativeInstruction::instruction_size * 5; 1013 } 1014 } 1015 1016 int MacroAssembler::ic_check(int end_alignment) { 1017 Register receiver = j_rarg0; 1018 Register data = rscratch2; 1019 Register tmp1 = rscratch1; 1020 Register tmp2 = r10; 1021 1022 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1023 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1024 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1025 // before the inline cache check here, and not after 1026 align(end_alignment, offset() + ic_check_size()); 1027 1028 int uep_offset = offset(); 1029 1030 if (UseCompressedClassPointers) { 1031 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1032 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1033 cmpw(tmp1, tmp2); 1034 } else { 1035 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1036 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1037 cmp(tmp1, tmp2); 1038 } 1039 1040 Label dont; 1041 br(Assembler::EQ, dont); 1042 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1043 bind(dont); 1044 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1045 1046 return uep_offset; 1047 } 1048 1049 // Implementation of call_VM versions 1050 1051 void MacroAssembler::call_VM(Register oop_result, 1052 address entry_point, 1053 bool check_exceptions) { 1054 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1055 } 1056 1057 void MacroAssembler::call_VM(Register oop_result, 1058 address entry_point, 1059 Register arg_1, 1060 bool check_exceptions) { 1061 pass_arg1(this, arg_1); 1062 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1063 } 1064 1065 void MacroAssembler::call_VM(Register oop_result, 1066 address entry_point, 1067 Register arg_1, 1068 Register arg_2, 1069 bool check_exceptions) { 1070 assert_different_registers(arg_1, c_rarg2); 1071 pass_arg2(this, arg_2); 1072 pass_arg1(this, arg_1); 1073 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1074 } 1075 1076 void MacroAssembler::call_VM(Register oop_result, 1077 address entry_point, 1078 Register arg_1, 1079 Register arg_2, 1080 Register arg_3, 1081 bool check_exceptions) { 1082 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1083 assert_different_registers(arg_2, c_rarg3); 1084 pass_arg3(this, arg_3); 1085 1086 pass_arg2(this, arg_2); 1087 1088 pass_arg1(this, arg_1); 1089 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1090 } 1091 1092 void MacroAssembler::call_VM(Register oop_result, 1093 Register last_java_sp, 1094 address entry_point, 1095 int number_of_arguments, 1096 bool check_exceptions) { 1097 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1098 } 1099 1100 void MacroAssembler::call_VM(Register oop_result, 1101 Register last_java_sp, 1102 address entry_point, 1103 Register arg_1, 1104 bool check_exceptions) { 1105 pass_arg1(this, arg_1); 1106 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1107 } 1108 1109 void MacroAssembler::call_VM(Register oop_result, 1110 Register last_java_sp, 1111 address entry_point, 1112 Register arg_1, 1113 Register arg_2, 1114 bool check_exceptions) { 1115 1116 assert_different_registers(arg_1, c_rarg2); 1117 pass_arg2(this, arg_2); 1118 pass_arg1(this, arg_1); 1119 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1120 } 1121 1122 void MacroAssembler::call_VM(Register oop_result, 1123 Register last_java_sp, 1124 address entry_point, 1125 Register arg_1, 1126 Register arg_2, 1127 Register arg_3, 1128 bool check_exceptions) { 1129 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1130 assert_different_registers(arg_2, c_rarg3); 1131 pass_arg3(this, arg_3); 1132 pass_arg2(this, arg_2); 1133 pass_arg1(this, arg_1); 1134 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1135 } 1136 1137 1138 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1139 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1140 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1141 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1142 } 1143 1144 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1145 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1146 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1147 } 1148 1149 void MacroAssembler::align(int modulus) { 1150 align(modulus, offset()); 1151 } 1152 1153 // Ensure that the code at target bytes offset from the current offset() is aligned 1154 // according to modulus. 1155 void MacroAssembler::align(int modulus, int target) { 1156 int delta = target - offset(); 1157 while ((offset() + delta) % modulus != 0) nop(); 1158 } 1159 1160 void MacroAssembler::post_call_nop() { 1161 if (!Continuations::enabled()) { 1162 return; 1163 } 1164 InstructionMark im(this); 1165 relocate(post_call_nop_Relocation::spec()); 1166 InlineSkippedInstructionsCounter skipCounter(this); 1167 nop(); 1168 movk(zr, 0); 1169 movk(zr, 0); 1170 } 1171 1172 // these are no-ops overridden by InterpreterMacroAssembler 1173 1174 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1175 1176 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1177 1178 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 1179 assert_different_registers(inline_klass, temp_reg, obj, rscratch2); 1180 #ifdef ASSERT 1181 { 1182 Label done_check; 1183 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 1184 stop("get_default_value_oop from non inline type klass"); 1185 bind(done_check); 1186 } 1187 #endif 1188 Register offset = temp_reg; 1189 // Getting the offset of the pre-allocated default value 1190 ldr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 1191 load_sized_value(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())), sizeof(int), true /*is_signed*/); 1192 1193 // Getting the mirror 1194 ldr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 1195 resolve_oop_handle(obj, inline_klass, rscratch2); 1196 1197 // Getting the pre-allocated default value from the mirror 1198 Address field(obj, offset); 1199 load_heap_oop(obj, field, inline_klass, rscratch2); 1200 } 1201 1202 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 1203 #ifdef ASSERT 1204 { 1205 Label done_check; 1206 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 1207 stop("get_empty_value from non-empty inline klass"); 1208 bind(done_check); 1209 } 1210 #endif 1211 get_default_value_oop(inline_klass, temp_reg, obj); 1212 } 1213 1214 // Look up the method for a megamorphic invokeinterface call. 1215 // The target method is determined by <intf_klass, itable_index>. 1216 // The receiver klass is in recv_klass. 1217 // On success, the result will be in method_result, and execution falls through. 1218 // On failure, execution transfers to the given label. 1219 void MacroAssembler::lookup_interface_method(Register recv_klass, 1220 Register intf_klass, 1221 RegisterOrConstant itable_index, 1222 Register method_result, 1223 Register scan_temp, 1224 Label& L_no_such_interface, 1225 bool return_method) { 1226 assert_different_registers(recv_klass, intf_klass, scan_temp); 1227 assert_different_registers(method_result, intf_klass, scan_temp); 1228 assert(recv_klass != method_result || !return_method, 1229 "recv_klass can be destroyed when method isn't needed"); 1230 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1231 "caller must use same register for non-constant itable index as for method"); 1232 1233 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1234 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1235 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1236 int scan_step = itableOffsetEntry::size() * wordSize; 1237 int vte_size = vtableEntry::size_in_bytes(); 1238 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1239 1240 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1241 1242 // Could store the aligned, prescaled offset in the klass. 1243 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1244 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1245 add(scan_temp, scan_temp, vtable_base); 1246 1247 if (return_method) { 1248 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1249 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1250 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1251 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1252 if (itentry_off) 1253 add(recv_klass, recv_klass, itentry_off); 1254 } 1255 1256 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1257 // if (scan->interface() == intf) { 1258 // result = (klass + scan->offset() + itable_index); 1259 // } 1260 // } 1261 Label search, found_method; 1262 1263 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1264 cmp(intf_klass, method_result); 1265 br(Assembler::EQ, found_method); 1266 bind(search); 1267 // Check that the previous entry is non-null. A null entry means that 1268 // the receiver class doesn't implement the interface, and wasn't the 1269 // same as when the caller was compiled. 1270 cbz(method_result, L_no_such_interface); 1271 if (itableOffsetEntry::interface_offset() != 0) { 1272 add(scan_temp, scan_temp, scan_step); 1273 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1274 } else { 1275 ldr(method_result, Address(pre(scan_temp, scan_step))); 1276 } 1277 cmp(intf_klass, method_result); 1278 br(Assembler::NE, search); 1279 1280 bind(found_method); 1281 1282 // Got a hit. 1283 if (return_method) { 1284 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1285 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1286 } 1287 } 1288 1289 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1290 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1291 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1292 // The target method is determined by <holder_klass, itable_index>. 1293 // The receiver klass is in recv_klass. 1294 // On success, the result will be in method_result, and execution falls through. 1295 // On failure, execution transfers to the given label. 1296 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1297 Register holder_klass, 1298 Register resolved_klass, 1299 Register method_result, 1300 Register temp_itbl_klass, 1301 Register scan_temp, 1302 int itable_index, 1303 Label& L_no_such_interface) { 1304 // 'method_result' is only used as output register at the very end of this method. 1305 // Until then we can reuse it as 'holder_offset'. 1306 Register holder_offset = method_result; 1307 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1308 1309 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1310 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1311 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1312 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1313 1314 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1315 1316 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1317 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1318 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1319 // temp_itbl_klass = itable[0]._interface; 1320 int vtblEntrySize = vtableEntry::size_in_bytes(); 1321 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1322 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1323 mov(holder_offset, zr); 1324 // scan_temp = &(itable[0]._interface) 1325 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1326 1327 // Initial checks: 1328 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1329 // - if (itable[0] == holder_klass), shortcut to "holder found" 1330 // - if (itable[0] == 0), no such interface 1331 cmp(resolved_klass, holder_klass); 1332 br(Assembler::NE, L_loop_search_resolved_entry); 1333 cmp(holder_klass, temp_itbl_klass); 1334 br(Assembler::EQ, L_holder_found); 1335 cbz(temp_itbl_klass, L_no_such_interface); 1336 1337 // Loop: Look for holder_klass record in itable 1338 // do { 1339 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1340 // if (temp_itbl_klass == holder_klass) { 1341 // goto L_holder_found; // Found! 1342 // } 1343 // } while (temp_itbl_klass != 0); 1344 // goto L_no_such_interface // Not found. 1345 Label L_search_holder; 1346 bind(L_search_holder); 1347 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1348 cmp(holder_klass, temp_itbl_klass); 1349 br(Assembler::EQ, L_holder_found); 1350 cbnz(temp_itbl_klass, L_search_holder); 1351 1352 b(L_no_such_interface); 1353 1354 // Loop: Look for resolved_class record in itable 1355 // while (true) { 1356 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1357 // if (temp_itbl_klass == 0) { 1358 // goto L_no_such_interface; 1359 // } 1360 // if (temp_itbl_klass == resolved_klass) { 1361 // goto L_resolved_found; // Found! 1362 // } 1363 // if (temp_itbl_klass == holder_klass) { 1364 // holder_offset = scan_temp; 1365 // } 1366 // } 1367 // 1368 Label L_loop_search_resolved; 1369 bind(L_loop_search_resolved); 1370 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1371 bind(L_loop_search_resolved_entry); 1372 cbz(temp_itbl_klass, L_no_such_interface); 1373 cmp(resolved_klass, temp_itbl_klass); 1374 br(Assembler::EQ, L_resolved_found); 1375 cmp(holder_klass, temp_itbl_klass); 1376 br(Assembler::NE, L_loop_search_resolved); 1377 mov(holder_offset, scan_temp); 1378 b(L_loop_search_resolved); 1379 1380 // See if we already have a holder klass. If not, go and scan for it. 1381 bind(L_resolved_found); 1382 cbz(holder_offset, L_search_holder); 1383 mov(scan_temp, holder_offset); 1384 1385 // Finally, scan_temp contains holder_klass vtable offset 1386 bind(L_holder_found); 1387 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1388 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1389 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1390 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1391 } 1392 1393 // virtual method calling 1394 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1395 RegisterOrConstant vtable_index, 1396 Register method_result) { 1397 assert(vtableEntry::size() * wordSize == 8, 1398 "adjust the scaling in the code below"); 1399 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1400 1401 if (vtable_index.is_register()) { 1402 lea(method_result, Address(recv_klass, 1403 vtable_index.as_register(), 1404 Address::lsl(LogBytesPerWord))); 1405 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1406 } else { 1407 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1408 ldr(method_result, 1409 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1410 } 1411 } 1412 1413 void MacroAssembler::check_klass_subtype(Register sub_klass, 1414 Register super_klass, 1415 Register temp_reg, 1416 Label& L_success) { 1417 Label L_failure; 1418 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1419 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1420 bind(L_failure); 1421 } 1422 1423 1424 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1425 Register super_klass, 1426 Register temp_reg, 1427 Label* L_success, 1428 Label* L_failure, 1429 Label* L_slow_path, 1430 RegisterOrConstant super_check_offset) { 1431 assert_different_registers(sub_klass, super_klass, temp_reg); 1432 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1433 if (super_check_offset.is_register()) { 1434 assert_different_registers(sub_klass, super_klass, 1435 super_check_offset.as_register()); 1436 } else if (must_load_sco) { 1437 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1438 } 1439 1440 Label L_fallthrough; 1441 int label_nulls = 0; 1442 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1443 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1444 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1445 assert(label_nulls <= 1, "at most one null in the batch"); 1446 1447 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1448 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1449 Address super_check_offset_addr(super_klass, sco_offset); 1450 1451 // Hacked jmp, which may only be used just before L_fallthrough. 1452 #define final_jmp(label) \ 1453 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1454 else b(label) /*omit semi*/ 1455 1456 // If the pointers are equal, we are done (e.g., String[] elements). 1457 // This self-check enables sharing of secondary supertype arrays among 1458 // non-primary types such as array-of-interface. Otherwise, each such 1459 // type would need its own customized SSA. 1460 // We move this check to the front of the fast path because many 1461 // type checks are in fact trivially successful in this manner, 1462 // so we get a nicely predicted branch right at the start of the check. 1463 cmp(sub_klass, super_klass); 1464 br(Assembler::EQ, *L_success); 1465 1466 // Check the supertype display: 1467 if (must_load_sco) { 1468 ldrw(temp_reg, super_check_offset_addr); 1469 super_check_offset = RegisterOrConstant(temp_reg); 1470 } 1471 Address super_check_addr(sub_klass, super_check_offset); 1472 ldr(rscratch1, super_check_addr); 1473 cmp(super_klass, rscratch1); // load displayed supertype 1474 1475 // This check has worked decisively for primary supers. 1476 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1477 // (Secondary supers are interfaces and very deeply nested subtypes.) 1478 // This works in the same check above because of a tricky aliasing 1479 // between the super_cache and the primary super display elements. 1480 // (The 'super_check_addr' can address either, as the case requires.) 1481 // Note that the cache is updated below if it does not help us find 1482 // what we need immediately. 1483 // So if it was a primary super, we can just fail immediately. 1484 // Otherwise, it's the slow path for us (no success at this point). 1485 1486 if (super_check_offset.is_register()) { 1487 br(Assembler::EQ, *L_success); 1488 subs(zr, super_check_offset.as_register(), sc_offset); 1489 if (L_failure == &L_fallthrough) { 1490 br(Assembler::EQ, *L_slow_path); 1491 } else { 1492 br(Assembler::NE, *L_failure); 1493 final_jmp(*L_slow_path); 1494 } 1495 } else if (super_check_offset.as_constant() == sc_offset) { 1496 // Need a slow path; fast failure is impossible. 1497 if (L_slow_path == &L_fallthrough) { 1498 br(Assembler::EQ, *L_success); 1499 } else { 1500 br(Assembler::NE, *L_slow_path); 1501 final_jmp(*L_success); 1502 } 1503 } else { 1504 // No slow path; it's a fast decision. 1505 if (L_failure == &L_fallthrough) { 1506 br(Assembler::EQ, *L_success); 1507 } else { 1508 br(Assembler::NE, *L_failure); 1509 final_jmp(*L_success); 1510 } 1511 } 1512 1513 bind(L_fallthrough); 1514 1515 #undef final_jmp 1516 } 1517 1518 // These two are taken from x86, but they look generally useful 1519 1520 // scans count pointer sized words at [addr] for occurrence of value, 1521 // generic 1522 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1523 Register scratch) { 1524 Label Lloop, Lexit; 1525 cbz(count, Lexit); 1526 bind(Lloop); 1527 ldr(scratch, post(addr, wordSize)); 1528 cmp(value, scratch); 1529 br(EQ, Lexit); 1530 sub(count, count, 1); 1531 cbnz(count, Lloop); 1532 bind(Lexit); 1533 } 1534 1535 // scans count 4 byte words at [addr] for occurrence of value, 1536 // generic 1537 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1538 Register scratch) { 1539 Label Lloop, Lexit; 1540 cbz(count, Lexit); 1541 bind(Lloop); 1542 ldrw(scratch, post(addr, wordSize)); 1543 cmpw(value, scratch); 1544 br(EQ, Lexit); 1545 sub(count, count, 1); 1546 cbnz(count, Lloop); 1547 bind(Lexit); 1548 } 1549 1550 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1551 Register super_klass, 1552 Register temp_reg, 1553 Register temp2_reg, 1554 Label* L_success, 1555 Label* L_failure, 1556 bool set_cond_codes) { 1557 // NB! Callers may assume that, when temp2_reg is a valid register, 1558 // this code sets it to a nonzero value. 1559 1560 assert_different_registers(sub_klass, super_klass, temp_reg); 1561 if (temp2_reg != noreg) 1562 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1563 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1564 1565 Label L_fallthrough; 1566 int label_nulls = 0; 1567 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1568 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1569 assert(label_nulls <= 1, "at most one null in the batch"); 1570 1571 // a couple of useful fields in sub_klass: 1572 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1573 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1574 Address secondary_supers_addr(sub_klass, ss_offset); 1575 Address super_cache_addr( sub_klass, sc_offset); 1576 1577 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1578 1579 // Do a linear scan of the secondary super-klass chain. 1580 // This code is rarely used, so simplicity is a virtue here. 1581 // The repne_scan instruction uses fixed registers, which we must spill. 1582 // Don't worry too much about pre-existing connections with the input regs. 1583 1584 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1585 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1586 1587 RegSet pushed_registers; 1588 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1589 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1590 1591 if (super_klass != r0) { 1592 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1593 } 1594 1595 push(pushed_registers, sp); 1596 1597 // Get super_klass value into r0 (even if it was in r5 or r2). 1598 if (super_klass != r0) { 1599 mov(r0, super_klass); 1600 } 1601 1602 #ifndef PRODUCT 1603 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1604 #endif //PRODUCT 1605 1606 // We will consult the secondary-super array. 1607 ldr(r5, secondary_supers_addr); 1608 // Load the array length. 1609 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1610 // Skip to start of data. 1611 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1612 1613 cmp(sp, zr); // Clear Z flag; SP is never zero 1614 // Scan R2 words at [R5] for an occurrence of R0. 1615 // Set NZ/Z based on last compare. 1616 repne_scan(r5, r0, r2, rscratch1); 1617 1618 // Unspill the temp. registers: 1619 pop(pushed_registers, sp); 1620 1621 br(Assembler::NE, *L_failure); 1622 1623 // Success. Cache the super we found and proceed in triumph. 1624 str(super_klass, super_cache_addr); 1625 1626 if (L_success != &L_fallthrough) { 1627 b(*L_success); 1628 } 1629 1630 #undef IS_A_TEMP 1631 1632 bind(L_fallthrough); 1633 } 1634 1635 // Ensure that the inline code and the stub are using the same registers. 1636 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1637 do { \ 1638 assert(r_super_klass == r0 && \ 1639 r_array_base == r1 && \ 1640 r_array_length == r2 && \ 1641 (r_array_index == r3 || r_array_index == noreg) && \ 1642 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1643 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1644 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1645 } while(0) 1646 1647 // Return true: we succeeded in generating this code 1648 bool MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, 1649 Register r_super_klass, 1650 Register temp1, 1651 Register temp2, 1652 Register temp3, 1653 FloatRegister vtemp, 1654 Register result, 1655 u1 super_klass_slot, 1656 bool stub_is_near) { 1657 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1658 1659 Label L_fallthrough; 1660 1661 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1662 1663 const Register 1664 r_array_base = temp1, // r1 1665 r_array_length = temp2, // r2 1666 r_array_index = temp3, // r3 1667 r_bitmap = rscratch2; 1668 1669 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1670 1671 u1 bit = super_klass_slot; 1672 1673 // Make sure that result is nonzero if the TBZ below misses. 1674 mov(result, 1); 1675 1676 // We're going to need the bitmap in a vector reg and in a core reg, 1677 // so load both now. 1678 ldr(r_bitmap, Address(r_sub_klass, Klass::bitmap_offset())); 1679 if (bit != 0) { 1680 ldrd(vtemp, Address(r_sub_klass, Klass::bitmap_offset())); 1681 } 1682 // First check the bitmap to see if super_klass might be present. If 1683 // the bit is zero, we are certain that super_klass is not one of 1684 // the secondary supers. 1685 tbz(r_bitmap, bit, L_fallthrough); 1686 1687 // Get the first array index that can contain super_klass into r_array_index. 1688 if (bit != 0) { 1689 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1690 cnt(vtemp, T8B, vtemp); 1691 addv(vtemp, T8B, vtemp); 1692 fmovd(r_array_index, vtemp); 1693 } else { 1694 mov(r_array_index, (u1)1); 1695 } 1696 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1697 1698 // We will consult the secondary-super array. 1699 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1700 1701 // The value i in r_array_index is >= 1, so even though r_array_base 1702 // points to the length, we don't need to adjust it to point to the 1703 // data. 1704 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1705 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1706 1707 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1708 eor(result, result, r_super_klass); 1709 cbz(result, L_fallthrough); // Found a match 1710 1711 // Is there another entry to check? Consult the bitmap. 1712 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1713 1714 // Linear probe. 1715 if (bit != 0) { 1716 ror(r_bitmap, r_bitmap, bit); 1717 } 1718 1719 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1720 // The next slot to be inspected, by the stub we're about to call, 1721 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1722 // have been checked. 1723 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1724 if (stub_is_near) { 1725 bl(stub); 1726 } else { 1727 address call = trampoline_call(stub); 1728 if (call == nullptr) { 1729 return false; // trampoline allocation failed 1730 } 1731 } 1732 1733 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1734 1735 bind(L_fallthrough); 1736 1737 if (VerifySecondarySupers) { 1738 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1739 temp1, temp2, result); // r1, r2, r5 1740 } 1741 return true; 1742 } 1743 1744 // Called by code generated by check_klass_subtype_slow_path 1745 // above. This is called when there is a collision in the hashed 1746 // lookup in the secondary supers array. 1747 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1748 Register r_array_base, 1749 Register r_array_index, 1750 Register r_bitmap, 1751 Register temp1, 1752 Register result) { 1753 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1754 1755 const Register 1756 r_array_length = temp1, 1757 r_sub_klass = noreg; // unused 1758 1759 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1760 1761 Label L_fallthrough, L_huge; 1762 1763 // Load the array length. 1764 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1765 // And adjust the array base to point to the data. 1766 // NB! Effectively increments current slot index by 1. 1767 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1768 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1769 1770 // The bitmap is full to bursting. 1771 // Implicit invariant: BITMAP_FULL implies (length > 0) 1772 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1773 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2)); 1774 br(GT, L_huge); 1775 1776 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1777 // current slot (at secondary_supers[r_array_index]) has not yet 1778 // been inspected, and r_array_index may be out of bounds if we 1779 // wrapped around the end of the array. 1780 1781 { // This is conventional linear probing, but instead of terminating 1782 // when a null entry is found in the table, we maintain a bitmap 1783 // in which a 0 indicates missing entries. 1784 // The check above guarantees there are 0s in the bitmap, so the loop 1785 // eventually terminates. 1786 Label L_loop; 1787 bind(L_loop); 1788 1789 // Check for wraparound. 1790 cmp(r_array_index, r_array_length); 1791 csel(r_array_index, zr, r_array_index, GE); 1792 1793 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1794 eor(result, rscratch1, r_super_klass); 1795 cbz(result, L_fallthrough); 1796 1797 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1798 1799 ror(r_bitmap, r_bitmap, 1); 1800 add(r_array_index, r_array_index, 1); 1801 b(L_loop); 1802 } 1803 1804 { // Degenerate case: more than 64 secondary supers. 1805 // FIXME: We could do something smarter here, maybe a vectorized 1806 // comparison or a binary search, but is that worth any added 1807 // complexity? 1808 bind(L_huge); 1809 cmp(sp, zr); // Clear Z flag; SP is never zero 1810 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 1811 cset(result, NE); // result == 0 iff we got a match. 1812 } 1813 1814 bind(L_fallthrough); 1815 } 1816 1817 // Make sure that the hashed lookup and a linear scan agree. 1818 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 1819 Register r_super_klass, 1820 Register temp1, 1821 Register temp2, 1822 Register result) { 1823 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 1824 1825 const Register 1826 r_array_base = temp1, 1827 r_array_length = temp2, 1828 r_array_index = noreg, // unused 1829 r_bitmap = noreg; // unused 1830 1831 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1832 1833 BLOCK_COMMENT("verify_secondary_supers_table {"); 1834 1835 // We will consult the secondary-super array. 1836 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1837 1838 // Load the array length. 1839 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1840 // And adjust the array base to point to the data. 1841 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1842 1843 cmp(sp, zr); // Clear Z flag; SP is never zero 1844 // Scan R2 words at [R5] for an occurrence of R0. 1845 // Set NZ/Z based on last compare. 1846 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 1847 // rscratch1 == 0 iff we got a match. 1848 cset(rscratch1, NE); 1849 1850 Label passed; 1851 cmp(result, zr); 1852 cset(result, NE); // normalize result to 0/1 for comparison 1853 1854 cmp(rscratch1, result); 1855 br(EQ, passed); 1856 { 1857 mov(r0, r_super_klass); // r0 <- r0 1858 mov(r1, r_sub_klass); // r1 <- r4 1859 mov(r2, /*expected*/rscratch1); // r2 <- r8 1860 mov(r3, result); // r3 <- r5 1861 mov(r4, (address)("mismatch")); // r4 <- const 1862 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 1863 should_not_reach_here(); 1864 } 1865 bind(passed); 1866 1867 BLOCK_COMMENT("} verify_secondary_supers_table"); 1868 } 1869 1870 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 1871 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 1872 assert_different_registers(klass, rthread, scratch); 1873 1874 Label L_fallthrough, L_tmp; 1875 if (L_fast_path == nullptr) { 1876 L_fast_path = &L_fallthrough; 1877 } else if (L_slow_path == nullptr) { 1878 L_slow_path = &L_fallthrough; 1879 } 1880 // Fast path check: class is fully initialized 1881 lea(scratch, Address(klass, InstanceKlass::init_state_offset())); 1882 ldarb(scratch, scratch); 1883 subs(zr, scratch, InstanceKlass::fully_initialized); 1884 br(Assembler::EQ, *L_fast_path); 1885 1886 // Fast path check: current thread is initializer thread 1887 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 1888 cmp(rthread, scratch); 1889 1890 if (L_slow_path == &L_fallthrough) { 1891 br(Assembler::EQ, *L_fast_path); 1892 bind(*L_slow_path); 1893 } else if (L_fast_path == &L_fallthrough) { 1894 br(Assembler::NE, *L_slow_path); 1895 bind(*L_fast_path); 1896 } else { 1897 Unimplemented(); 1898 } 1899 } 1900 1901 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1902 if (!VerifyOops || VerifyAdapterSharing) { 1903 // Below address of the code string confuses VerifyAdapterSharing 1904 // because it may differ between otherwise equivalent adapters. 1905 return; 1906 } 1907 1908 // Pass register number to verify_oop_subroutine 1909 const char* b = nullptr; 1910 { 1911 ResourceMark rm; 1912 stringStream ss; 1913 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 1914 b = code_string(ss.as_string()); 1915 } 1916 BLOCK_COMMENT("verify_oop {"); 1917 1918 strip_return_address(); // This might happen within a stack frame. 1919 protect_return_address(); 1920 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1921 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1922 1923 mov(r0, reg); 1924 movptr(rscratch1, (uintptr_t)(address)b); 1925 1926 // call indirectly to solve generation ordering problem 1927 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1928 ldr(rscratch2, Address(rscratch2)); 1929 blr(rscratch2); 1930 1931 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1932 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1933 authenticate_return_address(); 1934 1935 BLOCK_COMMENT("} verify_oop"); 1936 } 1937 1938 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1939 if (!VerifyOops || VerifyAdapterSharing) { 1940 // Below address of the code string confuses VerifyAdapterSharing 1941 // because it may differ between otherwise equivalent adapters. 1942 return; 1943 } 1944 1945 const char* b = nullptr; 1946 { 1947 ResourceMark rm; 1948 stringStream ss; 1949 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 1950 b = code_string(ss.as_string()); 1951 } 1952 BLOCK_COMMENT("verify_oop_addr {"); 1953 1954 strip_return_address(); // This might happen within a stack frame. 1955 protect_return_address(); 1956 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1957 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1958 1959 // addr may contain sp so we will have to adjust it based on the 1960 // pushes that we just did. 1961 if (addr.uses(sp)) { 1962 lea(r0, addr); 1963 ldr(r0, Address(r0, 4 * wordSize)); 1964 } else { 1965 ldr(r0, addr); 1966 } 1967 movptr(rscratch1, (uintptr_t)(address)b); 1968 1969 // call indirectly to solve generation ordering problem 1970 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1971 ldr(rscratch2, Address(rscratch2)); 1972 blr(rscratch2); 1973 1974 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1975 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1976 authenticate_return_address(); 1977 1978 BLOCK_COMMENT("} verify_oop_addr"); 1979 } 1980 1981 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1982 int extra_slot_offset) { 1983 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1984 int stackElementSize = Interpreter::stackElementSize; 1985 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1986 #ifdef ASSERT 1987 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1988 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1989 #endif 1990 if (arg_slot.is_constant()) { 1991 return Address(esp, arg_slot.as_constant() * stackElementSize 1992 + offset); 1993 } else { 1994 add(rscratch1, esp, arg_slot.as_register(), 1995 ext::uxtx, exact_log2(stackElementSize)); 1996 return Address(rscratch1, offset); 1997 } 1998 } 1999 2000 void MacroAssembler::call_VM_leaf_base(address entry_point, 2001 int number_of_arguments, 2002 Label *retaddr) { 2003 Label E, L; 2004 2005 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 2006 2007 mov(rscratch1, entry_point); 2008 blr(rscratch1); 2009 if (retaddr) 2010 bind(*retaddr); 2011 2012 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 2013 } 2014 2015 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 2016 call_VM_leaf_base(entry_point, number_of_arguments); 2017 } 2018 2019 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2020 pass_arg0(this, arg_0); 2021 call_VM_leaf_base(entry_point, 1); 2022 } 2023 2024 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2025 assert_different_registers(arg_1, c_rarg0); 2026 pass_arg0(this, arg_0); 2027 pass_arg1(this, arg_1); 2028 call_VM_leaf_base(entry_point, 2); 2029 } 2030 2031 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2032 Register arg_1, Register arg_2) { 2033 assert_different_registers(arg_1, c_rarg0); 2034 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2035 pass_arg0(this, arg_0); 2036 pass_arg1(this, arg_1); 2037 pass_arg2(this, arg_2); 2038 call_VM_leaf_base(entry_point, 3); 2039 } 2040 2041 void MacroAssembler::super_call_VM_leaf(address entry_point) { 2042 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2043 } 2044 2045 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2046 pass_arg0(this, arg_0); 2047 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2048 } 2049 2050 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2051 2052 assert_different_registers(arg_0, c_rarg1); 2053 pass_arg1(this, arg_1); 2054 pass_arg0(this, arg_0); 2055 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2056 } 2057 2058 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2059 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2060 assert_different_registers(arg_1, c_rarg2); 2061 pass_arg2(this, arg_2); 2062 pass_arg1(this, arg_1); 2063 pass_arg0(this, arg_0); 2064 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2065 } 2066 2067 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2068 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2069 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2070 assert_different_registers(arg_2, c_rarg3); 2071 pass_arg3(this, arg_3); 2072 pass_arg2(this, arg_2); 2073 pass_arg1(this, arg_1); 2074 pass_arg0(this, arg_0); 2075 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2076 } 2077 2078 void MacroAssembler::null_check(Register reg, int offset) { 2079 if (needs_explicit_null_check(offset)) { 2080 // provoke OS null exception if reg is null by 2081 // accessing M[reg] w/o changing any registers 2082 // NOTE: this is plenty to provoke a segv 2083 ldr(zr, Address(reg)); 2084 } else { 2085 // nothing to do, (later) access of M[reg + offset] 2086 // will provoke OS null exception if reg is null 2087 } 2088 } 2089 2090 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 2091 assert_different_registers(markword, rscratch2); 2092 andr(markword, markword, markWord::inline_type_mask_in_place); 2093 mov(rscratch2, markWord::inline_type_pattern); 2094 cmp(markword, rscratch2); 2095 br(Assembler::EQ, is_inline_type); 2096 } 2097 2098 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 2099 ldrw(temp_reg, Address(klass, Klass::access_flags_offset())); 2100 andr(temp_reg, temp_reg, JVM_ACC_IDENTITY); 2101 cbz(temp_reg, is_inline_type); 2102 } 2103 2104 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 2105 assert_different_registers(tmp, rscratch1); 2106 cbz(object, not_inline_type); 2107 const int is_inline_type_mask = markWord::inline_type_pattern; 2108 ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 2109 mov(rscratch1, is_inline_type_mask); 2110 andr(tmp, tmp, rscratch1); 2111 cmp(tmp, rscratch1); 2112 br(Assembler::NE, not_inline_type); 2113 } 2114 2115 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 2116 #ifdef ASSERT 2117 { 2118 Label done_check; 2119 test_klass_is_inline_type(klass, temp_reg, done_check); 2120 stop("test_klass_is_empty_inline_type with non inline type klass"); 2121 bind(done_check); 2122 } 2123 #endif 2124 ldrw(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 2125 andr(temp_reg, temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 2126 cbnz(temp_reg, is_empty_inline_type); 2127 } 2128 2129 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 2130 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 2131 tbnz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, is_null_free_inline_type); 2132 } 2133 2134 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 2135 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 2136 tbz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, not_null_free_inline_type); 2137 } 2138 2139 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 2140 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 2141 tbnz(flags, ResolvedFieldEntry::is_flat_shift, is_flat); 2142 } 2143 2144 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) { 2145 assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86 2146 tbnz(flags, ResolvedFieldEntry::has_null_marker_shift, has_null_marker); 2147 } 2148 2149 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 2150 Label test_mark_word; 2151 // load mark word 2152 ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 2153 // check displaced 2154 tst(temp_reg, markWord::unlocked_value); 2155 br(Assembler::NE, test_mark_word); 2156 // slow path use klass prototype 2157 load_prototype_header(temp_reg, oop); 2158 2159 bind(test_mark_word); 2160 andr(temp_reg, temp_reg, test_bit); 2161 if (jmp_set) { 2162 cbnz(temp_reg, jmp_label); 2163 } else { 2164 cbz(temp_reg, jmp_label); 2165 } 2166 } 2167 2168 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) { 2169 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 2170 } 2171 2172 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 2173 Label&is_non_flat_array) { 2174 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 2175 } 2176 2177 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) { 2178 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 2179 } 2180 2181 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 2182 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 2183 } 2184 2185 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 2186 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 2187 br(Assembler::NE, is_flat_array); 2188 } 2189 2190 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 2191 tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 2192 br(Assembler::EQ, is_non_flat_array); 2193 } 2194 2195 // MacroAssembler protected routines needed to implement 2196 // public methods 2197 2198 void MacroAssembler::mov(Register r, Address dest) { 2199 code_section()->relocate(pc(), dest.rspec()); 2200 uint64_t imm64 = (uint64_t)dest.target(); 2201 movptr(r, imm64); 2202 } 2203 2204 // Move a constant pointer into r. In AArch64 mode the virtual 2205 // address space is 48 bits in size, so we only need three 2206 // instructions to create a patchable instruction sequence that can 2207 // reach anywhere. 2208 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2209 #ifndef PRODUCT 2210 { 2211 char buffer[64]; 2212 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2213 block_comment(buffer); 2214 } 2215 #endif 2216 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2217 movz(r, imm64 & 0xffff); 2218 imm64 >>= 16; 2219 movk(r, imm64 & 0xffff, 16); 2220 imm64 >>= 16; 2221 movk(r, imm64 & 0xffff, 32); 2222 } 2223 2224 // Macro to mov replicated immediate to vector register. 2225 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2226 // the upper 56/48/32 bits must be zeros for B/H/S type. 2227 // Vd will get the following values for different arrangements in T 2228 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2229 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2230 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2231 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2232 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2233 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2234 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2235 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2236 // Clobbers rscratch1 2237 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2238 assert(T != T1Q, "unsupported"); 2239 if (T == T1D || T == T2D) { 2240 int imm = operand_valid_for_movi_immediate(imm64, T); 2241 if (-1 != imm) { 2242 movi(Vd, T, imm); 2243 } else { 2244 mov(rscratch1, imm64); 2245 dup(Vd, T, rscratch1); 2246 } 2247 return; 2248 } 2249 2250 #ifdef ASSERT 2251 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2252 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2253 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2254 #endif 2255 int shift = operand_valid_for_movi_immediate(imm64, T); 2256 uint32_t imm32 = imm64 & 0xffffffffULL; 2257 if (shift >= 0) { 2258 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2259 } else { 2260 movw(rscratch1, imm32); 2261 dup(Vd, T, rscratch1); 2262 } 2263 } 2264 2265 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2266 { 2267 #ifndef PRODUCT 2268 { 2269 char buffer[64]; 2270 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2271 block_comment(buffer); 2272 } 2273 #endif 2274 if (operand_valid_for_logical_immediate(false, imm64)) { 2275 orr(dst, zr, imm64); 2276 } else { 2277 // we can use a combination of MOVZ or MOVN with 2278 // MOVK to build up the constant 2279 uint64_t imm_h[4]; 2280 int zero_count = 0; 2281 int neg_count = 0; 2282 int i; 2283 for (i = 0; i < 4; i++) { 2284 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2285 if (imm_h[i] == 0) { 2286 zero_count++; 2287 } else if (imm_h[i] == 0xffffL) { 2288 neg_count++; 2289 } 2290 } 2291 if (zero_count == 4) { 2292 // one MOVZ will do 2293 movz(dst, 0); 2294 } else if (neg_count == 4) { 2295 // one MOVN will do 2296 movn(dst, 0); 2297 } else if (zero_count == 3) { 2298 for (i = 0; i < 4; i++) { 2299 if (imm_h[i] != 0L) { 2300 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2301 break; 2302 } 2303 } 2304 } else if (neg_count == 3) { 2305 // one MOVN will do 2306 for (int i = 0; i < 4; i++) { 2307 if (imm_h[i] != 0xffffL) { 2308 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2309 break; 2310 } 2311 } 2312 } else if (zero_count == 2) { 2313 // one MOVZ and one MOVK will do 2314 for (i = 0; i < 3; i++) { 2315 if (imm_h[i] != 0L) { 2316 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2317 i++; 2318 break; 2319 } 2320 } 2321 for (;i < 4; i++) { 2322 if (imm_h[i] != 0L) { 2323 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2324 } 2325 } 2326 } else if (neg_count == 2) { 2327 // one MOVN and one MOVK will do 2328 for (i = 0; i < 4; i++) { 2329 if (imm_h[i] != 0xffffL) { 2330 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2331 i++; 2332 break; 2333 } 2334 } 2335 for (;i < 4; i++) { 2336 if (imm_h[i] != 0xffffL) { 2337 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2338 } 2339 } 2340 } else if (zero_count == 1) { 2341 // one MOVZ and two MOVKs will do 2342 for (i = 0; i < 4; i++) { 2343 if (imm_h[i] != 0L) { 2344 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2345 i++; 2346 break; 2347 } 2348 } 2349 for (;i < 4; i++) { 2350 if (imm_h[i] != 0x0L) { 2351 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2352 } 2353 } 2354 } else if (neg_count == 1) { 2355 // one MOVN and two MOVKs will do 2356 for (i = 0; i < 4; i++) { 2357 if (imm_h[i] != 0xffffL) { 2358 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2359 i++; 2360 break; 2361 } 2362 } 2363 for (;i < 4; i++) { 2364 if (imm_h[i] != 0xffffL) { 2365 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2366 } 2367 } 2368 } else { 2369 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2370 movz(dst, (uint32_t)imm_h[0], 0); 2371 for (i = 1; i < 4; i++) { 2372 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2373 } 2374 } 2375 } 2376 } 2377 2378 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2379 { 2380 #ifndef PRODUCT 2381 { 2382 char buffer[64]; 2383 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2384 block_comment(buffer); 2385 } 2386 #endif 2387 if (operand_valid_for_logical_immediate(true, imm32)) { 2388 orrw(dst, zr, imm32); 2389 } else { 2390 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2391 // constant 2392 uint32_t imm_h[2]; 2393 imm_h[0] = imm32 & 0xffff; 2394 imm_h[1] = ((imm32 >> 16) & 0xffff); 2395 if (imm_h[0] == 0) { 2396 movzw(dst, imm_h[1], 16); 2397 } else if (imm_h[0] == 0xffff) { 2398 movnw(dst, imm_h[1] ^ 0xffff, 16); 2399 } else if (imm_h[1] == 0) { 2400 movzw(dst, imm_h[0], 0); 2401 } else if (imm_h[1] == 0xffff) { 2402 movnw(dst, imm_h[0] ^ 0xffff, 0); 2403 } else { 2404 // use a MOVZ and MOVK (makes it easier to debug) 2405 movzw(dst, imm_h[0], 0); 2406 movkw(dst, imm_h[1], 16); 2407 } 2408 } 2409 } 2410 2411 // Form an address from base + offset in Rd. Rd may or may 2412 // not actually be used: you must use the Address that is returned. 2413 // It is up to you to ensure that the shift provided matches the size 2414 // of your data. 2415 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2416 if (Address::offset_ok_for_immed(byte_offset, shift)) 2417 // It fits; no need for any heroics 2418 return Address(base, byte_offset); 2419 2420 // Don't do anything clever with negative or misaligned offsets 2421 unsigned mask = (1 << shift) - 1; 2422 if (byte_offset < 0 || byte_offset & mask) { 2423 mov(Rd, byte_offset); 2424 add(Rd, base, Rd); 2425 return Address(Rd); 2426 } 2427 2428 // See if we can do this with two 12-bit offsets 2429 { 2430 uint64_t word_offset = byte_offset >> shift; 2431 uint64_t masked_offset = word_offset & 0xfff000; 2432 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2433 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2434 add(Rd, base, masked_offset << shift); 2435 word_offset -= masked_offset; 2436 return Address(Rd, word_offset << shift); 2437 } 2438 } 2439 2440 // Do it the hard way 2441 mov(Rd, byte_offset); 2442 add(Rd, base, Rd); 2443 return Address(Rd); 2444 } 2445 2446 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2447 bool want_remainder, Register scratch) 2448 { 2449 // Full implementation of Java idiv and irem. The function 2450 // returns the (pc) offset of the div instruction - may be needed 2451 // for implicit exceptions. 2452 // 2453 // constraint : ra/rb =/= scratch 2454 // normal case 2455 // 2456 // input : ra: dividend 2457 // rb: divisor 2458 // 2459 // result: either 2460 // quotient (= ra idiv rb) 2461 // remainder (= ra irem rb) 2462 2463 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2464 2465 int idivl_offset = offset(); 2466 if (! want_remainder) { 2467 sdivw(result, ra, rb); 2468 } else { 2469 sdivw(scratch, ra, rb); 2470 Assembler::msubw(result, scratch, rb, ra); 2471 } 2472 2473 return idivl_offset; 2474 } 2475 2476 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2477 bool want_remainder, Register scratch) 2478 { 2479 // Full implementation of Java ldiv and lrem. The function 2480 // returns the (pc) offset of the div instruction - may be needed 2481 // for implicit exceptions. 2482 // 2483 // constraint : ra/rb =/= scratch 2484 // normal case 2485 // 2486 // input : ra: dividend 2487 // rb: divisor 2488 // 2489 // result: either 2490 // quotient (= ra idiv rb) 2491 // remainder (= ra irem rb) 2492 2493 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2494 2495 int idivq_offset = offset(); 2496 if (! want_remainder) { 2497 sdiv(result, ra, rb); 2498 } else { 2499 sdiv(scratch, ra, rb); 2500 Assembler::msub(result, scratch, rb, ra); 2501 } 2502 2503 return idivq_offset; 2504 } 2505 2506 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2507 address prev = pc() - NativeMembar::instruction_size; 2508 address last = code()->last_insn(); 2509 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2510 NativeMembar *bar = NativeMembar_at(prev); 2511 if (AlwaysMergeDMB) { 2512 bar->set_kind(bar->get_kind() | order_constraint); 2513 BLOCK_COMMENT("merged membar(always)"); 2514 return; 2515 } 2516 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2517 // doing so would introduce a StoreLoad which the caller did not 2518 // intend 2519 if (bar->get_kind() == order_constraint 2520 || bar->get_kind() == AnyAny 2521 || order_constraint == AnyAny) { 2522 // We are merging two memory barrier instructions. On AArch64 we 2523 // can do this simply by ORing them together. 2524 bar->set_kind(bar->get_kind() | order_constraint); 2525 BLOCK_COMMENT("merged membar"); 2526 return; 2527 } else { 2528 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2529 // We need check the last 2 instructions 2530 address prev2 = prev - NativeMembar::instruction_size; 2531 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2532 NativeMembar *bar2 = NativeMembar_at(prev2); 2533 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2534 BLOCK_COMMENT("merged membar(elided)"); 2535 return; 2536 } 2537 } 2538 } 2539 code()->set_last_insn(pc()); 2540 dmb(Assembler::barrier(order_constraint)); 2541 } 2542 2543 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2544 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2545 merge_ldst(rt, adr, size_in_bytes, is_store); 2546 code()->clear_last_insn(); 2547 return true; 2548 } else { 2549 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2550 const uint64_t mask = size_in_bytes - 1; 2551 if (adr.getMode() == Address::base_plus_offset && 2552 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2553 code()->set_last_insn(pc()); 2554 } 2555 return false; 2556 } 2557 } 2558 2559 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2560 // We always try to merge two adjacent loads into one ldp. 2561 if (!try_merge_ldst(Rx, adr, 8, false)) { 2562 Assembler::ldr(Rx, adr); 2563 } 2564 } 2565 2566 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2567 // We always try to merge two adjacent loads into one ldp. 2568 if (!try_merge_ldst(Rw, adr, 4, false)) { 2569 Assembler::ldrw(Rw, adr); 2570 } 2571 } 2572 2573 void MacroAssembler::str(Register Rx, const Address &adr) { 2574 // We always try to merge two adjacent stores into one stp. 2575 if (!try_merge_ldst(Rx, adr, 8, true)) { 2576 Assembler::str(Rx, adr); 2577 } 2578 } 2579 2580 void MacroAssembler::strw(Register Rw, const Address &adr) { 2581 // We always try to merge two adjacent stores into one stp. 2582 if (!try_merge_ldst(Rw, adr, 4, true)) { 2583 Assembler::strw(Rw, adr); 2584 } 2585 } 2586 2587 // MacroAssembler routines found actually to be needed 2588 2589 void MacroAssembler::push(Register src) 2590 { 2591 str(src, Address(pre(esp, -1 * wordSize))); 2592 } 2593 2594 void MacroAssembler::pop(Register dst) 2595 { 2596 ldr(dst, Address(post(esp, 1 * wordSize))); 2597 } 2598 2599 // Note: load_unsigned_short used to be called load_unsigned_word. 2600 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2601 int off = offset(); 2602 ldrh(dst, src); 2603 return off; 2604 } 2605 2606 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2607 int off = offset(); 2608 ldrb(dst, src); 2609 return off; 2610 } 2611 2612 int MacroAssembler::load_signed_short(Register dst, Address src) { 2613 int off = offset(); 2614 ldrsh(dst, src); 2615 return off; 2616 } 2617 2618 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2619 int off = offset(); 2620 ldrsb(dst, src); 2621 return off; 2622 } 2623 2624 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2625 int off = offset(); 2626 ldrshw(dst, src); 2627 return off; 2628 } 2629 2630 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2631 int off = offset(); 2632 ldrsbw(dst, src); 2633 return off; 2634 } 2635 2636 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2637 switch (size_in_bytes) { 2638 case 8: ldr(dst, src); break; 2639 case 4: ldrw(dst, src); break; 2640 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2641 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2642 default: ShouldNotReachHere(); 2643 } 2644 } 2645 2646 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2647 switch (size_in_bytes) { 2648 case 8: str(src, dst); break; 2649 case 4: strw(src, dst); break; 2650 case 2: strh(src, dst); break; 2651 case 1: strb(src, dst); break; 2652 default: ShouldNotReachHere(); 2653 } 2654 } 2655 2656 void MacroAssembler::decrementw(Register reg, int value) 2657 { 2658 if (value < 0) { incrementw(reg, -value); return; } 2659 if (value == 0) { return; } 2660 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2661 /* else */ { 2662 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2663 movw(rscratch2, (unsigned)value); 2664 subw(reg, reg, rscratch2); 2665 } 2666 } 2667 2668 void MacroAssembler::decrement(Register reg, int value) 2669 { 2670 if (value < 0) { increment(reg, -value); return; } 2671 if (value == 0) { return; } 2672 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2673 /* else */ { 2674 assert(reg != rscratch2, "invalid dst for register decrement"); 2675 mov(rscratch2, (uint64_t)value); 2676 sub(reg, reg, rscratch2); 2677 } 2678 } 2679 2680 void MacroAssembler::decrementw(Address dst, int value) 2681 { 2682 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2683 if (dst.getMode() == Address::literal) { 2684 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2685 lea(rscratch2, dst); 2686 dst = Address(rscratch2); 2687 } 2688 ldrw(rscratch1, dst); 2689 decrementw(rscratch1, value); 2690 strw(rscratch1, dst); 2691 } 2692 2693 void MacroAssembler::decrement(Address dst, int value) 2694 { 2695 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2696 if (dst.getMode() == Address::literal) { 2697 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2698 lea(rscratch2, dst); 2699 dst = Address(rscratch2); 2700 } 2701 ldr(rscratch1, dst); 2702 decrement(rscratch1, value); 2703 str(rscratch1, dst); 2704 } 2705 2706 void MacroAssembler::incrementw(Register reg, int value) 2707 { 2708 if (value < 0) { decrementw(reg, -value); return; } 2709 if (value == 0) { return; } 2710 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2711 /* else */ { 2712 assert(reg != rscratch2, "invalid dst for register increment"); 2713 movw(rscratch2, (unsigned)value); 2714 addw(reg, reg, rscratch2); 2715 } 2716 } 2717 2718 void MacroAssembler::increment(Register reg, int value) 2719 { 2720 if (value < 0) { decrement(reg, -value); return; } 2721 if (value == 0) { return; } 2722 if (value < (1 << 12)) { add(reg, reg, value); return; } 2723 /* else */ { 2724 assert(reg != rscratch2, "invalid dst for register increment"); 2725 movw(rscratch2, (unsigned)value); 2726 add(reg, reg, rscratch2); 2727 } 2728 } 2729 2730 void MacroAssembler::incrementw(Address dst, int value) 2731 { 2732 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2733 if (dst.getMode() == Address::literal) { 2734 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2735 lea(rscratch2, dst); 2736 dst = Address(rscratch2); 2737 } 2738 ldrw(rscratch1, dst); 2739 incrementw(rscratch1, value); 2740 strw(rscratch1, dst); 2741 } 2742 2743 void MacroAssembler::increment(Address dst, int value) 2744 { 2745 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2746 if (dst.getMode() == Address::literal) { 2747 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2748 lea(rscratch2, dst); 2749 dst = Address(rscratch2); 2750 } 2751 ldr(rscratch1, dst); 2752 increment(rscratch1, value); 2753 str(rscratch1, dst); 2754 } 2755 2756 // Push lots of registers in the bit set supplied. Don't push sp. 2757 // Return the number of words pushed 2758 int MacroAssembler::push(unsigned int bitset, Register stack) { 2759 int words_pushed = 0; 2760 2761 // Scan bitset to accumulate register pairs 2762 unsigned char regs[32]; 2763 int count = 0; 2764 for (int reg = 0; reg <= 30; reg++) { 2765 if (1 & bitset) 2766 regs[count++] = reg; 2767 bitset >>= 1; 2768 } 2769 regs[count++] = zr->raw_encoding(); 2770 count &= ~1; // Only push an even number of regs 2771 2772 if (count) { 2773 stp(as_Register(regs[0]), as_Register(regs[1]), 2774 Address(pre(stack, -count * wordSize))); 2775 words_pushed += 2; 2776 } 2777 for (int i = 2; i < count; i += 2) { 2778 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2779 Address(stack, i * wordSize)); 2780 words_pushed += 2; 2781 } 2782 2783 assert(words_pushed == count, "oops, pushed != count"); 2784 2785 return count; 2786 } 2787 2788 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2789 int words_pushed = 0; 2790 2791 // Scan bitset to accumulate register pairs 2792 unsigned char regs[32]; 2793 int count = 0; 2794 for (int reg = 0; reg <= 30; reg++) { 2795 if (1 & bitset) 2796 regs[count++] = reg; 2797 bitset >>= 1; 2798 } 2799 regs[count++] = zr->raw_encoding(); 2800 count &= ~1; 2801 2802 for (int i = 2; i < count; i += 2) { 2803 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2804 Address(stack, i * wordSize)); 2805 words_pushed += 2; 2806 } 2807 if (count) { 2808 ldp(as_Register(regs[0]), as_Register(regs[1]), 2809 Address(post(stack, count * wordSize))); 2810 words_pushed += 2; 2811 } 2812 2813 assert(words_pushed == count, "oops, pushed != count"); 2814 2815 return count; 2816 } 2817 2818 // Push lots of registers in the bit set supplied. Don't push sp. 2819 // Return the number of dwords pushed 2820 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2821 int words_pushed = 0; 2822 bool use_sve = false; 2823 int sve_vector_size_in_bytes = 0; 2824 2825 #ifdef COMPILER2 2826 use_sve = Matcher::supports_scalable_vector(); 2827 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2828 #endif 2829 2830 // Scan bitset to accumulate register pairs 2831 unsigned char regs[32]; 2832 int count = 0; 2833 for (int reg = 0; reg <= 31; reg++) { 2834 if (1 & bitset) 2835 regs[count++] = reg; 2836 bitset >>= 1; 2837 } 2838 2839 if (count == 0) { 2840 return 0; 2841 } 2842 2843 if (mode == PushPopFull) { 2844 if (use_sve && sve_vector_size_in_bytes > 16) { 2845 mode = PushPopSVE; 2846 } else { 2847 mode = PushPopNeon; 2848 } 2849 } 2850 2851 #ifndef PRODUCT 2852 { 2853 char buffer[48]; 2854 if (mode == PushPopSVE) { 2855 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2856 } else if (mode == PushPopNeon) { 2857 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2858 } else { 2859 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2860 } 2861 block_comment(buffer); 2862 } 2863 #endif 2864 2865 if (mode == PushPopSVE) { 2866 sub(stack, stack, sve_vector_size_in_bytes * count); 2867 for (int i = 0; i < count; i++) { 2868 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2869 } 2870 return count * sve_vector_size_in_bytes / 8; 2871 } 2872 2873 if (mode == PushPopNeon) { 2874 if (count == 1) { 2875 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2876 return 2; 2877 } 2878 2879 bool odd = (count & 1) == 1; 2880 int push_slots = count + (odd ? 1 : 0); 2881 2882 // Always pushing full 128 bit registers. 2883 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2884 words_pushed += 2; 2885 2886 for (int i = 2; i + 1 < count; i += 2) { 2887 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2888 words_pushed += 2; 2889 } 2890 2891 if (odd) { 2892 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2893 words_pushed++; 2894 } 2895 2896 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2897 return count * 2; 2898 } 2899 2900 if (mode == PushPopFp) { 2901 bool odd = (count & 1) == 1; 2902 int push_slots = count + (odd ? 1 : 0); 2903 2904 if (count == 1) { 2905 // Stack pointer must be 16 bytes aligned 2906 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2907 return 1; 2908 } 2909 2910 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2911 words_pushed += 2; 2912 2913 for (int i = 2; i + 1 < count; i += 2) { 2914 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2915 words_pushed += 2; 2916 } 2917 2918 if (odd) { 2919 // Stack pointer must be 16 bytes aligned 2920 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2921 words_pushed++; 2922 } 2923 2924 assert(words_pushed == count, "oops, pushed != count"); 2925 2926 return count; 2927 } 2928 2929 return 0; 2930 } 2931 2932 // Return the number of dwords popped 2933 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2934 int words_pushed = 0; 2935 bool use_sve = false; 2936 int sve_vector_size_in_bytes = 0; 2937 2938 #ifdef COMPILER2 2939 use_sve = Matcher::supports_scalable_vector(); 2940 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2941 #endif 2942 // Scan bitset to accumulate register pairs 2943 unsigned char regs[32]; 2944 int count = 0; 2945 for (int reg = 0; reg <= 31; reg++) { 2946 if (1 & bitset) 2947 regs[count++] = reg; 2948 bitset >>= 1; 2949 } 2950 2951 if (count == 0) { 2952 return 0; 2953 } 2954 2955 if (mode == PushPopFull) { 2956 if (use_sve && sve_vector_size_in_bytes > 16) { 2957 mode = PushPopSVE; 2958 } else { 2959 mode = PushPopNeon; 2960 } 2961 } 2962 2963 #ifndef PRODUCT 2964 { 2965 char buffer[48]; 2966 if (mode == PushPopSVE) { 2967 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 2968 } else if (mode == PushPopNeon) { 2969 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 2970 } else { 2971 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 2972 } 2973 block_comment(buffer); 2974 } 2975 #endif 2976 2977 if (mode == PushPopSVE) { 2978 for (int i = count - 1; i >= 0; i--) { 2979 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 2980 } 2981 add(stack, stack, sve_vector_size_in_bytes * count); 2982 return count * sve_vector_size_in_bytes / 8; 2983 } 2984 2985 if (mode == PushPopNeon) { 2986 if (count == 1) { 2987 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 2988 return 2; 2989 } 2990 2991 bool odd = (count & 1) == 1; 2992 int push_slots = count + (odd ? 1 : 0); 2993 2994 if (odd) { 2995 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2996 words_pushed++; 2997 } 2998 2999 for (int i = 2; i + 1 < count; i += 2) { 3000 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 3001 words_pushed += 2; 3002 } 3003 3004 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 3005 words_pushed += 2; 3006 3007 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 3008 3009 return count * 2; 3010 } 3011 3012 if (mode == PushPopFp) { 3013 bool odd = (count & 1) == 1; 3014 int push_slots = count + (odd ? 1 : 0); 3015 3016 if (count == 1) { 3017 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 3018 return 1; 3019 } 3020 3021 if (odd) { 3022 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 3023 words_pushed++; 3024 } 3025 3026 for (int i = 2; i + 1 < count; i += 2) { 3027 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 3028 words_pushed += 2; 3029 } 3030 3031 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 3032 words_pushed += 2; 3033 3034 assert(words_pushed == count, "oops, pushed != count"); 3035 3036 return count; 3037 } 3038 3039 return 0; 3040 } 3041 3042 // Return the number of dwords pushed 3043 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 3044 bool use_sve = false; 3045 int sve_predicate_size_in_slots = 0; 3046 3047 #ifdef COMPILER2 3048 use_sve = Matcher::supports_scalable_vector(); 3049 if (use_sve) { 3050 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3051 } 3052 #endif 3053 3054 if (!use_sve) { 3055 return 0; 3056 } 3057 3058 unsigned char regs[PRegister::number_of_registers]; 3059 int count = 0; 3060 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3061 if (1 & bitset) 3062 regs[count++] = reg; 3063 bitset >>= 1; 3064 } 3065 3066 if (count == 0) { 3067 return 0; 3068 } 3069 3070 int total_push_bytes = align_up(sve_predicate_size_in_slots * 3071 VMRegImpl::stack_slot_size * count, 16); 3072 sub(stack, stack, total_push_bytes); 3073 for (int i = 0; i < count; i++) { 3074 sve_str(as_PRegister(regs[i]), Address(stack, i)); 3075 } 3076 return total_push_bytes / 8; 3077 } 3078 3079 // Return the number of dwords popped 3080 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 3081 bool use_sve = false; 3082 int sve_predicate_size_in_slots = 0; 3083 3084 #ifdef COMPILER2 3085 use_sve = Matcher::supports_scalable_vector(); 3086 if (use_sve) { 3087 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3088 } 3089 #endif 3090 3091 if (!use_sve) { 3092 return 0; 3093 } 3094 3095 unsigned char regs[PRegister::number_of_registers]; 3096 int count = 0; 3097 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3098 if (1 & bitset) 3099 regs[count++] = reg; 3100 bitset >>= 1; 3101 } 3102 3103 if (count == 0) { 3104 return 0; 3105 } 3106 3107 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 3108 VMRegImpl::stack_slot_size * count, 16); 3109 for (int i = count - 1; i >= 0; i--) { 3110 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 3111 } 3112 add(stack, stack, total_pop_bytes); 3113 return total_pop_bytes / 8; 3114 } 3115 3116 #ifdef ASSERT 3117 void MacroAssembler::verify_heapbase(const char* msg) { 3118 #if 0 3119 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 3120 assert (Universe::heap() != nullptr, "java heap should be initialized"); 3121 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 3122 // rheapbase is allocated as general register 3123 return; 3124 } 3125 if (CheckCompressedOops) { 3126 Label ok; 3127 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 3128 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3129 br(Assembler::EQ, ok); 3130 stop(msg); 3131 bind(ok); 3132 pop(1 << rscratch1->encoding(), sp); 3133 } 3134 #endif 3135 } 3136 #endif 3137 3138 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3139 assert_different_registers(value, tmp1, tmp2); 3140 Label done, tagged, weak_tagged; 3141 3142 cbz(value, done); // Use null as-is. 3143 tst(value, JNIHandles::tag_mask); // Test for tag. 3144 br(Assembler::NE, tagged); 3145 3146 // Resolve local handle 3147 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3148 verify_oop(value); 3149 b(done); 3150 3151 bind(tagged); 3152 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3153 tbnz(value, 0, weak_tagged); // Test for weak tag. 3154 3155 // Resolve global handle 3156 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3157 verify_oop(value); 3158 b(done); 3159 3160 bind(weak_tagged); 3161 // Resolve jweak. 3162 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3163 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3164 verify_oop(value); 3165 3166 bind(done); 3167 } 3168 3169 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3170 assert_different_registers(value, tmp1, tmp2); 3171 Label done; 3172 3173 cbz(value, done); // Use null as-is. 3174 3175 #ifdef ASSERT 3176 { 3177 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3178 Label valid_global_tag; 3179 tbnz(value, 1, valid_global_tag); // Test for global tag 3180 stop("non global jobject using resolve_global_jobject"); 3181 bind(valid_global_tag); 3182 } 3183 #endif 3184 3185 // Resolve global handle 3186 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3187 verify_oop(value); 3188 3189 bind(done); 3190 } 3191 3192 void MacroAssembler::stop(const char* msg) { 3193 BLOCK_COMMENT(msg); 3194 dcps1(0xdeae); 3195 emit_int64((uintptr_t)msg); 3196 } 3197 3198 void MacroAssembler::unimplemented(const char* what) { 3199 const char* buf = nullptr; 3200 { 3201 ResourceMark rm; 3202 stringStream ss; 3203 ss.print("unimplemented: %s", what); 3204 buf = code_string(ss.as_string()); 3205 } 3206 stop(buf); 3207 } 3208 3209 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3210 #ifdef ASSERT 3211 Label OK; 3212 br(cc, OK); 3213 stop(msg); 3214 bind(OK); 3215 #endif 3216 } 3217 3218 // If a constant does not fit in an immediate field, generate some 3219 // number of MOV instructions and then perform the operation. 3220 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3221 add_sub_imm_insn insn1, 3222 add_sub_reg_insn insn2, 3223 bool is32) { 3224 assert(Rd != zr, "Rd = zr and not setting flags?"); 3225 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3226 if (fits) { 3227 (this->*insn1)(Rd, Rn, imm); 3228 } else { 3229 if (uabs(imm) < (1 << 24)) { 3230 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3231 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3232 } else { 3233 assert_different_registers(Rd, Rn); 3234 mov(Rd, imm); 3235 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3236 } 3237 } 3238 } 3239 3240 // Separate vsn which sets the flags. Optimisations are more restricted 3241 // because we must set the flags correctly. 3242 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3243 add_sub_imm_insn insn1, 3244 add_sub_reg_insn insn2, 3245 bool is32) { 3246 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3247 if (fits) { 3248 (this->*insn1)(Rd, Rn, imm); 3249 } else { 3250 assert_different_registers(Rd, Rn); 3251 assert(Rd != zr, "overflow in immediate operand"); 3252 mov(Rd, imm); 3253 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3254 } 3255 } 3256 3257 3258 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3259 if (increment.is_register()) { 3260 add(Rd, Rn, increment.as_register()); 3261 } else { 3262 add(Rd, Rn, increment.as_constant()); 3263 } 3264 } 3265 3266 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3267 if (increment.is_register()) { 3268 addw(Rd, Rn, increment.as_register()); 3269 } else { 3270 addw(Rd, Rn, increment.as_constant()); 3271 } 3272 } 3273 3274 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3275 if (decrement.is_register()) { 3276 sub(Rd, Rn, decrement.as_register()); 3277 } else { 3278 sub(Rd, Rn, decrement.as_constant()); 3279 } 3280 } 3281 3282 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3283 if (decrement.is_register()) { 3284 subw(Rd, Rn, decrement.as_register()); 3285 } else { 3286 subw(Rd, Rn, decrement.as_constant()); 3287 } 3288 } 3289 3290 void MacroAssembler::reinit_heapbase() 3291 { 3292 if (UseCompressedOops) { 3293 if (Universe::is_fully_initialized()) { 3294 mov(rheapbase, CompressedOops::base()); 3295 } else { 3296 lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3297 ldr(rheapbase, Address(rheapbase)); 3298 } 3299 } 3300 } 3301 3302 // this simulates the behaviour of the x86 cmpxchg instruction using a 3303 // load linked/store conditional pair. we use the acquire/release 3304 // versions of these instructions so that we flush pending writes as 3305 // per Java semantics. 3306 3307 // n.b the x86 version assumes the old value to be compared against is 3308 // in rax and updates rax with the value located in memory if the 3309 // cmpxchg fails. we supply a register for the old value explicitly 3310 3311 // the aarch64 load linked/store conditional instructions do not 3312 // accept an offset. so, unlike x86, we must provide a plain register 3313 // to identify the memory word to be compared/exchanged rather than a 3314 // register+offset Address. 3315 3316 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3317 Label &succeed, Label *fail) { 3318 // oldv holds comparison value 3319 // newv holds value to write in exchange 3320 // addr identifies memory word to compare against/update 3321 if (UseLSE) { 3322 mov(tmp, oldv); 3323 casal(Assembler::xword, oldv, newv, addr); 3324 cmp(tmp, oldv); 3325 br(Assembler::EQ, succeed); 3326 membar(AnyAny); 3327 } else { 3328 Label retry_load, nope; 3329 prfm(Address(addr), PSTL1STRM); 3330 bind(retry_load); 3331 // flush and load exclusive from the memory location 3332 // and fail if it is not what we expect 3333 ldaxr(tmp, addr); 3334 cmp(tmp, oldv); 3335 br(Assembler::NE, nope); 3336 // if we store+flush with no intervening write tmp will be zero 3337 stlxr(tmp, newv, addr); 3338 cbzw(tmp, succeed); 3339 // retry so we only ever return after a load fails to compare 3340 // ensures we don't return a stale value after a failed write. 3341 b(retry_load); 3342 // if the memory word differs we return it in oldv and signal a fail 3343 bind(nope); 3344 membar(AnyAny); 3345 mov(oldv, tmp); 3346 } 3347 if (fail) 3348 b(*fail); 3349 } 3350 3351 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3352 Label &succeed, Label *fail) { 3353 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3354 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3355 } 3356 3357 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3358 Label &succeed, Label *fail) { 3359 // oldv holds comparison value 3360 // newv holds value to write in exchange 3361 // addr identifies memory word to compare against/update 3362 // tmp returns 0/1 for success/failure 3363 if (UseLSE) { 3364 mov(tmp, oldv); 3365 casal(Assembler::word, oldv, newv, addr); 3366 cmp(tmp, oldv); 3367 br(Assembler::EQ, succeed); 3368 membar(AnyAny); 3369 } else { 3370 Label retry_load, nope; 3371 prfm(Address(addr), PSTL1STRM); 3372 bind(retry_load); 3373 // flush and load exclusive from the memory location 3374 // and fail if it is not what we expect 3375 ldaxrw(tmp, addr); 3376 cmp(tmp, oldv); 3377 br(Assembler::NE, nope); 3378 // if we store+flush with no intervening write tmp will be zero 3379 stlxrw(tmp, newv, addr); 3380 cbzw(tmp, succeed); 3381 // retry so we only ever return after a load fails to compare 3382 // ensures we don't return a stale value after a failed write. 3383 b(retry_load); 3384 // if the memory word differs we return it in oldv and signal a fail 3385 bind(nope); 3386 membar(AnyAny); 3387 mov(oldv, tmp); 3388 } 3389 if (fail) 3390 b(*fail); 3391 } 3392 3393 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3394 // doesn't retry and may fail spuriously. If the oldval is wanted, 3395 // Pass a register for the result, otherwise pass noreg. 3396 3397 // Clobbers rscratch1 3398 void MacroAssembler::cmpxchg(Register addr, Register expected, 3399 Register new_val, 3400 enum operand_size size, 3401 bool acquire, bool release, 3402 bool weak, 3403 Register result) { 3404 if (result == noreg) result = rscratch1; 3405 BLOCK_COMMENT("cmpxchg {"); 3406 if (UseLSE) { 3407 mov(result, expected); 3408 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3409 compare_eq(result, expected, size); 3410 #ifdef ASSERT 3411 // Poison rscratch1 which is written on !UseLSE branch 3412 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3413 #endif 3414 } else { 3415 Label retry_load, done; 3416 prfm(Address(addr), PSTL1STRM); 3417 bind(retry_load); 3418 load_exclusive(result, addr, size, acquire); 3419 compare_eq(result, expected, size); 3420 br(Assembler::NE, done); 3421 store_exclusive(rscratch1, new_val, addr, size, release); 3422 if (weak) { 3423 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3424 } else { 3425 cbnzw(rscratch1, retry_load); 3426 } 3427 bind(done); 3428 } 3429 BLOCK_COMMENT("} cmpxchg"); 3430 } 3431 3432 // A generic comparison. Only compares for equality, clobbers rscratch1. 3433 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3434 if (size == xword) { 3435 cmp(rm, rn); 3436 } else if (size == word) { 3437 cmpw(rm, rn); 3438 } else if (size == halfword) { 3439 eorw(rscratch1, rm, rn); 3440 ands(zr, rscratch1, 0xffff); 3441 } else if (size == byte) { 3442 eorw(rscratch1, rm, rn); 3443 ands(zr, rscratch1, 0xff); 3444 } else { 3445 ShouldNotReachHere(); 3446 } 3447 } 3448 3449 3450 static bool different(Register a, RegisterOrConstant b, Register c) { 3451 if (b.is_constant()) 3452 return a != c; 3453 else 3454 return a != b.as_register() && a != c && b.as_register() != c; 3455 } 3456 3457 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3458 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3459 if (UseLSE) { \ 3460 prev = prev->is_valid() ? prev : zr; \ 3461 if (incr.is_register()) { \ 3462 AOP(sz, incr.as_register(), prev, addr); \ 3463 } else { \ 3464 mov(rscratch2, incr.as_constant()); \ 3465 AOP(sz, rscratch2, prev, addr); \ 3466 } \ 3467 return; \ 3468 } \ 3469 Register result = rscratch2; \ 3470 if (prev->is_valid()) \ 3471 result = different(prev, incr, addr) ? prev : rscratch2; \ 3472 \ 3473 Label retry_load; \ 3474 prfm(Address(addr), PSTL1STRM); \ 3475 bind(retry_load); \ 3476 LDXR(result, addr); \ 3477 OP(rscratch1, result, incr); \ 3478 STXR(rscratch2, rscratch1, addr); \ 3479 cbnzw(rscratch2, retry_load); \ 3480 if (prev->is_valid() && prev != result) { \ 3481 IOP(prev, rscratch1, incr); \ 3482 } \ 3483 } 3484 3485 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3486 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3487 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3488 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3489 3490 #undef ATOMIC_OP 3491 3492 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3493 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3494 if (UseLSE) { \ 3495 prev = prev->is_valid() ? prev : zr; \ 3496 AOP(sz, newv, prev, addr); \ 3497 return; \ 3498 } \ 3499 Register result = rscratch2; \ 3500 if (prev->is_valid()) \ 3501 result = different(prev, newv, addr) ? prev : rscratch2; \ 3502 \ 3503 Label retry_load; \ 3504 prfm(Address(addr), PSTL1STRM); \ 3505 bind(retry_load); \ 3506 LDXR(result, addr); \ 3507 STXR(rscratch1, newv, addr); \ 3508 cbnzw(rscratch1, retry_load); \ 3509 if (prev->is_valid() && prev != result) \ 3510 mov(prev, result); \ 3511 } 3512 3513 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3514 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3515 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3516 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3517 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3518 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3519 3520 #undef ATOMIC_XCHG 3521 3522 #ifndef PRODUCT 3523 extern "C" void findpc(intptr_t x); 3524 #endif 3525 3526 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3527 { 3528 // In order to get locks to work, we need to fake a in_VM state 3529 if (ShowMessageBoxOnError ) { 3530 JavaThread* thread = JavaThread::current(); 3531 JavaThreadState saved_state = thread->thread_state(); 3532 thread->set_thread_state(_thread_in_vm); 3533 #ifndef PRODUCT 3534 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3535 ttyLocker ttyl; 3536 BytecodeCounter::print(); 3537 } 3538 #endif 3539 if (os::message_box(msg, "Execution stopped, print registers?")) { 3540 ttyLocker ttyl; 3541 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3542 #ifndef PRODUCT 3543 tty->cr(); 3544 findpc(pc); 3545 tty->cr(); 3546 #endif 3547 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3548 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3549 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3550 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3551 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3552 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3553 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3554 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3555 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3556 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3557 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3558 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3559 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3560 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3561 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3562 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3563 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3564 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3565 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3566 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3567 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3568 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3569 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3570 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3571 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3572 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3573 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3574 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3575 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3576 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3577 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3578 BREAKPOINT; 3579 } 3580 } 3581 fatal("DEBUG MESSAGE: %s", msg); 3582 } 3583 3584 RegSet MacroAssembler::call_clobbered_gp_registers() { 3585 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3586 #ifndef R18_RESERVED 3587 regs += r18_tls; 3588 #endif 3589 return regs; 3590 } 3591 3592 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3593 int step = 4 * wordSize; 3594 push(call_clobbered_gp_registers() - exclude, sp); 3595 sub(sp, sp, step); 3596 mov(rscratch1, -step); 3597 // Push v0-v7, v16-v31. 3598 for (int i = 31; i>= 4; i -= 4) { 3599 if (i <= v7->encoding() || i >= v16->encoding()) 3600 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3601 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3602 } 3603 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3604 as_FloatRegister(3), T1D, Address(sp)); 3605 } 3606 3607 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3608 for (int i = 0; i < 32; i += 4) { 3609 if (i <= v7->encoding() || i >= v16->encoding()) 3610 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3611 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3612 } 3613 3614 reinitialize_ptrue(); 3615 3616 pop(call_clobbered_gp_registers() - exclude, sp); 3617 } 3618 3619 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3620 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3621 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3622 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3623 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3624 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3625 sve_str(as_FloatRegister(i), Address(sp, i)); 3626 } 3627 } else { 3628 int step = (save_vectors ? 8 : 4) * wordSize; 3629 mov(rscratch1, -step); 3630 sub(sp, sp, step); 3631 for (int i = 28; i >= 4; i -= 4) { 3632 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3633 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3634 } 3635 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3636 } 3637 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3638 sub(sp, sp, total_predicate_in_bytes); 3639 for (int i = 0; i < PRegister::number_of_registers; i++) { 3640 sve_str(as_PRegister(i), Address(sp, i)); 3641 } 3642 } 3643 } 3644 3645 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3646 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3647 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3648 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3649 sve_ldr(as_PRegister(i), Address(sp, i)); 3650 } 3651 add(sp, sp, total_predicate_in_bytes); 3652 } 3653 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3654 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3655 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3656 } 3657 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3658 } else { 3659 int step = (restore_vectors ? 8 : 4) * wordSize; 3660 for (int i = 0; i <= 28; i += 4) 3661 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3662 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3663 } 3664 3665 // We may use predicate registers and rely on ptrue with SVE, 3666 // regardless of wide vector (> 8 bytes) used or not. 3667 if (use_sve) { 3668 reinitialize_ptrue(); 3669 } 3670 3671 // integer registers except lr & sp 3672 pop(RegSet::range(r0, r17), sp); 3673 #ifdef R18_RESERVED 3674 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3675 pop(RegSet::range(r20, r29), sp); 3676 #else 3677 pop(RegSet::range(r18_tls, r29), sp); 3678 #endif 3679 } 3680 3681 /** 3682 * Helpers for multiply_to_len(). 3683 */ 3684 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3685 Register src1, Register src2) { 3686 adds(dest_lo, dest_lo, src1); 3687 adc(dest_hi, dest_hi, zr); 3688 adds(dest_lo, dest_lo, src2); 3689 adc(final_dest_hi, dest_hi, zr); 3690 } 3691 3692 // Generate an address from (r + r1 extend offset). "size" is the 3693 // size of the operand. The result may be in rscratch2. 3694 Address MacroAssembler::offsetted_address(Register r, Register r1, 3695 Address::extend ext, int offset, int size) { 3696 if (offset || (ext.shift() % size != 0)) { 3697 lea(rscratch2, Address(r, r1, ext)); 3698 return Address(rscratch2, offset); 3699 } else { 3700 return Address(r, r1, ext); 3701 } 3702 } 3703 3704 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3705 { 3706 assert(offset >= 0, "spill to negative address?"); 3707 // Offset reachable ? 3708 // Not aligned - 9 bits signed offset 3709 // Aligned - 12 bits unsigned offset shifted 3710 Register base = sp; 3711 if ((offset & (size-1)) && offset >= (1<<8)) { 3712 add(tmp, base, offset & ((1<<12)-1)); 3713 base = tmp; 3714 offset &= -1u<<12; 3715 } 3716 3717 if (offset >= (1<<12) * size) { 3718 add(tmp, base, offset & (((1<<12)-1)<<12)); 3719 base = tmp; 3720 offset &= ~(((1<<12)-1)<<12); 3721 } 3722 3723 return Address(base, offset); 3724 } 3725 3726 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3727 assert(offset >= 0, "spill to negative address?"); 3728 3729 Register base = sp; 3730 3731 // An immediate offset in the range 0 to 255 which is multiplied 3732 // by the current vector or predicate register size in bytes. 3733 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3734 return Address(base, offset / sve_reg_size_in_bytes); 3735 } 3736 3737 add(tmp, base, offset); 3738 return Address(tmp); 3739 } 3740 3741 // Checks whether offset is aligned. 3742 // Returns true if it is, else false. 3743 bool MacroAssembler::merge_alignment_check(Register base, 3744 size_t size, 3745 int64_t cur_offset, 3746 int64_t prev_offset) const { 3747 if (AvoidUnalignedAccesses) { 3748 if (base == sp) { 3749 // Checks whether low offset if aligned to pair of registers. 3750 int64_t pair_mask = size * 2 - 1; 3751 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3752 return (offset & pair_mask) == 0; 3753 } else { // If base is not sp, we can't guarantee the access is aligned. 3754 return false; 3755 } 3756 } else { 3757 int64_t mask = size - 1; 3758 // Load/store pair instruction only supports element size aligned offset. 3759 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3760 } 3761 } 3762 3763 // Checks whether current and previous loads/stores can be merged. 3764 // Returns true if it can be merged, else false. 3765 bool MacroAssembler::ldst_can_merge(Register rt, 3766 const Address &adr, 3767 size_t cur_size_in_bytes, 3768 bool is_store) const { 3769 address prev = pc() - NativeInstruction::instruction_size; 3770 address last = code()->last_insn(); 3771 3772 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3773 return false; 3774 } 3775 3776 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3777 return false; 3778 } 3779 3780 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3781 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3782 3783 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3784 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3785 3786 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3787 return false; 3788 } 3789 3790 int64_t max_offset = 63 * prev_size_in_bytes; 3791 int64_t min_offset = -64 * prev_size_in_bytes; 3792 3793 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3794 3795 // Only same base can be merged. 3796 if (adr.base() != prev_ldst->base()) { 3797 return false; 3798 } 3799 3800 int64_t cur_offset = adr.offset(); 3801 int64_t prev_offset = prev_ldst->offset(); 3802 size_t diff = abs(cur_offset - prev_offset); 3803 if (diff != prev_size_in_bytes) { 3804 return false; 3805 } 3806 3807 // Following cases can not be merged: 3808 // ldr x2, [x2, #8] 3809 // ldr x3, [x2, #16] 3810 // or: 3811 // ldr x2, [x3, #8] 3812 // ldr x2, [x3, #16] 3813 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3814 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3815 return false; 3816 } 3817 3818 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3819 // Offset range must be in ldp/stp instruction's range. 3820 if (low_offset > max_offset || low_offset < min_offset) { 3821 return false; 3822 } 3823 3824 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3825 return true; 3826 } 3827 3828 return false; 3829 } 3830 3831 // Merge current load/store with previous load/store into ldp/stp. 3832 void MacroAssembler::merge_ldst(Register rt, 3833 const Address &adr, 3834 size_t cur_size_in_bytes, 3835 bool is_store) { 3836 3837 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3838 3839 Register rt_low, rt_high; 3840 address prev = pc() - NativeInstruction::instruction_size; 3841 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3842 3843 int64_t offset; 3844 3845 if (adr.offset() < prev_ldst->offset()) { 3846 offset = adr.offset(); 3847 rt_low = rt; 3848 rt_high = prev_ldst->target(); 3849 } else { 3850 offset = prev_ldst->offset(); 3851 rt_low = prev_ldst->target(); 3852 rt_high = rt; 3853 } 3854 3855 Address adr_p = Address(prev_ldst->base(), offset); 3856 // Overwrite previous generated binary. 3857 code_section()->set_end(prev); 3858 3859 const size_t sz = prev_ldst->size_in_bytes(); 3860 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3861 if (!is_store) { 3862 BLOCK_COMMENT("merged ldr pair"); 3863 if (sz == 8) { 3864 ldp(rt_low, rt_high, adr_p); 3865 } else { 3866 ldpw(rt_low, rt_high, adr_p); 3867 } 3868 } else { 3869 BLOCK_COMMENT("merged str pair"); 3870 if (sz == 8) { 3871 stp(rt_low, rt_high, adr_p); 3872 } else { 3873 stpw(rt_low, rt_high, adr_p); 3874 } 3875 } 3876 } 3877 3878 /** 3879 * Multiply 64 bit by 64 bit first loop. 3880 */ 3881 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3882 Register y, Register y_idx, Register z, 3883 Register carry, Register product, 3884 Register idx, Register kdx) { 3885 // 3886 // jlong carry, x[], y[], z[]; 3887 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3888 // huge_128 product = y[idx] * x[xstart] + carry; 3889 // z[kdx] = (jlong)product; 3890 // carry = (jlong)(product >>> 64); 3891 // } 3892 // z[xstart] = carry; 3893 // 3894 3895 Label L_first_loop, L_first_loop_exit; 3896 Label L_one_x, L_one_y, L_multiply; 3897 3898 subsw(xstart, xstart, 1); 3899 br(Assembler::MI, L_one_x); 3900 3901 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3902 ldr(x_xstart, Address(rscratch1)); 3903 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3904 3905 bind(L_first_loop); 3906 subsw(idx, idx, 1); 3907 br(Assembler::MI, L_first_loop_exit); 3908 subsw(idx, idx, 1); 3909 br(Assembler::MI, L_one_y); 3910 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3911 ldr(y_idx, Address(rscratch1)); 3912 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3913 bind(L_multiply); 3914 3915 // AArch64 has a multiply-accumulate instruction that we can't use 3916 // here because it has no way to process carries, so we have to use 3917 // separate add and adc instructions. Bah. 3918 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3919 mul(product, x_xstart, y_idx); 3920 adds(product, product, carry); 3921 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3922 3923 subw(kdx, kdx, 2); 3924 ror(product, product, 32); // back to big-endian 3925 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 3926 3927 b(L_first_loop); 3928 3929 bind(L_one_y); 3930 ldrw(y_idx, Address(y, 0)); 3931 b(L_multiply); 3932 3933 bind(L_one_x); 3934 ldrw(x_xstart, Address(x, 0)); 3935 b(L_first_loop); 3936 3937 bind(L_first_loop_exit); 3938 } 3939 3940 /** 3941 * Multiply 128 bit by 128. Unrolled inner loop. 3942 * 3943 */ 3944 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 3945 Register carry, Register carry2, 3946 Register idx, Register jdx, 3947 Register yz_idx1, Register yz_idx2, 3948 Register tmp, Register tmp3, Register tmp4, 3949 Register tmp6, Register product_hi) { 3950 3951 // jlong carry, x[], y[], z[]; 3952 // int kdx = ystart+1; 3953 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 3954 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 3955 // jlong carry2 = (jlong)(tmp3 >>> 64); 3956 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 3957 // carry = (jlong)(tmp4 >>> 64); 3958 // z[kdx+idx+1] = (jlong)tmp3; 3959 // z[kdx+idx] = (jlong)tmp4; 3960 // } 3961 // idx += 2; 3962 // if (idx > 0) { 3963 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 3964 // z[kdx+idx] = (jlong)yz_idx1; 3965 // carry = (jlong)(yz_idx1 >>> 64); 3966 // } 3967 // 3968 3969 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 3970 3971 lsrw(jdx, idx, 2); 3972 3973 bind(L_third_loop); 3974 3975 subsw(jdx, jdx, 1); 3976 br(Assembler::MI, L_third_loop_exit); 3977 subw(idx, idx, 4); 3978 3979 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3980 3981 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 3982 3983 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 3984 3985 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 3986 ror(yz_idx2, yz_idx2, 32); 3987 3988 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 3989 3990 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 3991 umulh(tmp4, product_hi, yz_idx1); 3992 3993 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 3994 ror(rscratch2, rscratch2, 32); 3995 3996 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 3997 umulh(carry2, product_hi, yz_idx2); 3998 3999 // propagate sum of both multiplications into carry:tmp4:tmp3 4000 adds(tmp3, tmp3, carry); 4001 adc(tmp4, tmp4, zr); 4002 adds(tmp3, tmp3, rscratch1); 4003 adcs(tmp4, tmp4, tmp); 4004 adc(carry, carry2, zr); 4005 adds(tmp4, tmp4, rscratch2); 4006 adc(carry, carry, zr); 4007 4008 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 4009 ror(tmp4, tmp4, 32); 4010 stp(tmp4, tmp3, Address(tmp6, 0)); 4011 4012 b(L_third_loop); 4013 bind (L_third_loop_exit); 4014 4015 andw (idx, idx, 0x3); 4016 cbz(idx, L_post_third_loop_done); 4017 4018 Label L_check_1; 4019 subsw(idx, idx, 2); 4020 br(Assembler::MI, L_check_1); 4021 4022 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4023 ldr(yz_idx1, Address(rscratch1, 0)); 4024 ror(yz_idx1, yz_idx1, 32); 4025 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4026 umulh(tmp4, product_hi, yz_idx1); 4027 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4028 ldr(yz_idx2, Address(rscratch1, 0)); 4029 ror(yz_idx2, yz_idx2, 32); 4030 4031 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 4032 4033 ror(tmp3, tmp3, 32); 4034 str(tmp3, Address(rscratch1, 0)); 4035 4036 bind (L_check_1); 4037 4038 andw (idx, idx, 0x1); 4039 subsw(idx, idx, 1); 4040 br(Assembler::MI, L_post_third_loop_done); 4041 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4042 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 4043 umulh(carry2, tmp4, product_hi); 4044 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4045 4046 add2_with_carry(carry2, tmp3, tmp4, carry); 4047 4048 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4049 extr(carry, carry2, tmp3, 32); 4050 4051 bind(L_post_third_loop_done); 4052 } 4053 4054 /** 4055 * Code for BigInteger::multiplyToLen() intrinsic. 4056 * 4057 * r0: x 4058 * r1: xlen 4059 * r2: y 4060 * r3: ylen 4061 * r4: z 4062 * r5: tmp0 4063 * r10: tmp1 4064 * r11: tmp2 4065 * r12: tmp3 4066 * r13: tmp4 4067 * r14: tmp5 4068 * r15: tmp6 4069 * r16: tmp7 4070 * 4071 */ 4072 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 4073 Register z, Register tmp0, 4074 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4075 Register tmp5, Register tmp6, Register product_hi) { 4076 4077 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 4078 4079 const Register idx = tmp1; 4080 const Register kdx = tmp2; 4081 const Register xstart = tmp3; 4082 4083 const Register y_idx = tmp4; 4084 const Register carry = tmp5; 4085 const Register product = xlen; 4086 const Register x_xstart = tmp0; 4087 4088 // First Loop. 4089 // 4090 // final static long LONG_MASK = 0xffffffffL; 4091 // int xstart = xlen - 1; 4092 // int ystart = ylen - 1; 4093 // long carry = 0; 4094 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 4095 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 4096 // z[kdx] = (int)product; 4097 // carry = product >>> 32; 4098 // } 4099 // z[xstart] = (int)carry; 4100 // 4101 4102 movw(idx, ylen); // idx = ylen; 4103 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 4104 mov(carry, zr); // carry = 0; 4105 4106 Label L_done; 4107 4108 movw(xstart, xlen); 4109 subsw(xstart, xstart, 1); 4110 br(Assembler::MI, L_done); 4111 4112 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 4113 4114 Label L_second_loop; 4115 cbzw(kdx, L_second_loop); 4116 4117 Label L_carry; 4118 subw(kdx, kdx, 1); 4119 cbzw(kdx, L_carry); 4120 4121 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4122 lsr(carry, carry, 32); 4123 subw(kdx, kdx, 1); 4124 4125 bind(L_carry); 4126 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4127 4128 // Second and third (nested) loops. 4129 // 4130 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4131 // carry = 0; 4132 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4133 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4134 // (z[k] & LONG_MASK) + carry; 4135 // z[k] = (int)product; 4136 // carry = product >>> 32; 4137 // } 4138 // z[i] = (int)carry; 4139 // } 4140 // 4141 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 4142 4143 const Register jdx = tmp1; 4144 4145 bind(L_second_loop); 4146 mov(carry, zr); // carry = 0; 4147 movw(jdx, ylen); // j = ystart+1 4148 4149 subsw(xstart, xstart, 1); // i = xstart-1; 4150 br(Assembler::MI, L_done); 4151 4152 str(z, Address(pre(sp, -4 * wordSize))); 4153 4154 Label L_last_x; 4155 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4156 subsw(xstart, xstart, 1); // i = xstart-1; 4157 br(Assembler::MI, L_last_x); 4158 4159 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4160 ldr(product_hi, Address(rscratch1)); 4161 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4162 4163 Label L_third_loop_prologue; 4164 bind(L_third_loop_prologue); 4165 4166 str(ylen, Address(sp, wordSize)); 4167 stp(x, xstart, Address(sp, 2 * wordSize)); 4168 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4169 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4170 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4171 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4172 4173 addw(tmp3, xlen, 1); 4174 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4175 subsw(tmp3, tmp3, 1); 4176 br(Assembler::MI, L_done); 4177 4178 lsr(carry, carry, 32); 4179 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4180 b(L_second_loop); 4181 4182 // Next infrequent code is moved outside loops. 4183 bind(L_last_x); 4184 ldrw(product_hi, Address(x, 0)); 4185 b(L_third_loop_prologue); 4186 4187 bind(L_done); 4188 } 4189 4190 // Code for BigInteger::mulAdd intrinsic 4191 // out = r0 4192 // in = r1 4193 // offset = r2 (already out.length-offset) 4194 // len = r3 4195 // k = r4 4196 // 4197 // pseudo code from java implementation: 4198 // carry = 0; 4199 // offset = out.length-offset - 1; 4200 // for (int j=len-1; j >= 0; j--) { 4201 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4202 // out[offset--] = (int)product; 4203 // carry = product >>> 32; 4204 // } 4205 // return (int)carry; 4206 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4207 Register len, Register k) { 4208 Label LOOP, END; 4209 // pre-loop 4210 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4211 csel(out, zr, out, Assembler::EQ); 4212 br(Assembler::EQ, END); 4213 add(in, in, len, LSL, 2); // in[j+1] address 4214 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4215 mov(out, zr); // used to keep carry now 4216 BIND(LOOP); 4217 ldrw(rscratch1, Address(pre(in, -4))); 4218 madd(rscratch1, rscratch1, k, out); 4219 ldrw(rscratch2, Address(pre(offset, -4))); 4220 add(rscratch1, rscratch1, rscratch2); 4221 strw(rscratch1, Address(offset)); 4222 lsr(out, rscratch1, 32); 4223 subs(len, len, 1); 4224 br(Assembler::NE, LOOP); 4225 BIND(END); 4226 } 4227 4228 /** 4229 * Emits code to update CRC-32 with a byte value according to constants in table 4230 * 4231 * @param [in,out]crc Register containing the crc. 4232 * @param [in]val Register containing the byte to fold into the CRC. 4233 * @param [in]table Register containing the table of crc constants. 4234 * 4235 * uint32_t crc; 4236 * val = crc_table[(val ^ crc) & 0xFF]; 4237 * crc = val ^ (crc >> 8); 4238 * 4239 */ 4240 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4241 eor(val, val, crc); 4242 andr(val, val, 0xff); 4243 ldrw(val, Address(table, val, Address::lsl(2))); 4244 eor(crc, val, crc, Assembler::LSR, 8); 4245 } 4246 4247 /** 4248 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4249 * 4250 * @param [in,out]crc Register containing the crc. 4251 * @param [in]v Register containing the 32-bit to fold into the CRC. 4252 * @param [in]table0 Register containing table 0 of crc constants. 4253 * @param [in]table1 Register containing table 1 of crc constants. 4254 * @param [in]table2 Register containing table 2 of crc constants. 4255 * @param [in]table3 Register containing table 3 of crc constants. 4256 * 4257 * uint32_t crc; 4258 * v = crc ^ v 4259 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4260 * 4261 */ 4262 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4263 Register table0, Register table1, Register table2, Register table3, 4264 bool upper) { 4265 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4266 uxtb(tmp, v); 4267 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4268 ubfx(tmp, v, 8, 8); 4269 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4270 eor(crc, crc, tmp); 4271 ubfx(tmp, v, 16, 8); 4272 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4273 eor(crc, crc, tmp); 4274 ubfx(tmp, v, 24, 8); 4275 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4276 eor(crc, crc, tmp); 4277 } 4278 4279 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4280 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4281 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4282 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4283 4284 subs(tmp0, len, 384); 4285 mvnw(crc, crc); 4286 br(Assembler::GE, CRC_by128_pre); 4287 BIND(CRC_less128); 4288 subs(len, len, 32); 4289 br(Assembler::GE, CRC_by32_loop); 4290 BIND(CRC_less32); 4291 adds(len, len, 32 - 4); 4292 br(Assembler::GE, CRC_by4_loop); 4293 adds(len, len, 4); 4294 br(Assembler::GT, CRC_by1_loop); 4295 b(L_exit); 4296 4297 BIND(CRC_by32_loop); 4298 ldp(tmp0, tmp1, Address(buf)); 4299 crc32x(crc, crc, tmp0); 4300 ldp(tmp2, tmp3, Address(buf, 16)); 4301 crc32x(crc, crc, tmp1); 4302 add(buf, buf, 32); 4303 crc32x(crc, crc, tmp2); 4304 subs(len, len, 32); 4305 crc32x(crc, crc, tmp3); 4306 br(Assembler::GE, CRC_by32_loop); 4307 cmn(len, (u1)32); 4308 br(Assembler::NE, CRC_less32); 4309 b(L_exit); 4310 4311 BIND(CRC_by4_loop); 4312 ldrw(tmp0, Address(post(buf, 4))); 4313 subs(len, len, 4); 4314 crc32w(crc, crc, tmp0); 4315 br(Assembler::GE, CRC_by4_loop); 4316 adds(len, len, 4); 4317 br(Assembler::LE, L_exit); 4318 BIND(CRC_by1_loop); 4319 ldrb(tmp0, Address(post(buf, 1))); 4320 subs(len, len, 1); 4321 crc32b(crc, crc, tmp0); 4322 br(Assembler::GT, CRC_by1_loop); 4323 b(L_exit); 4324 4325 BIND(CRC_by128_pre); 4326 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4327 4*256*sizeof(juint) + 8*sizeof(juint)); 4328 mov(crc, 0); 4329 crc32x(crc, crc, tmp0); 4330 crc32x(crc, crc, tmp1); 4331 4332 cbnz(len, CRC_less128); 4333 4334 BIND(L_exit); 4335 mvnw(crc, crc); 4336 } 4337 4338 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4339 Register len, Register tmp0, Register tmp1, Register tmp2, 4340 Register tmp3) { 4341 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4342 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4343 4344 mvnw(crc, crc); 4345 4346 subs(len, len, 128); 4347 br(Assembler::GE, CRC_by64_pre); 4348 BIND(CRC_less64); 4349 adds(len, len, 128-32); 4350 br(Assembler::GE, CRC_by32_loop); 4351 BIND(CRC_less32); 4352 adds(len, len, 32-4); 4353 br(Assembler::GE, CRC_by4_loop); 4354 adds(len, len, 4); 4355 br(Assembler::GT, CRC_by1_loop); 4356 b(L_exit); 4357 4358 BIND(CRC_by32_loop); 4359 ldp(tmp0, tmp1, Address(post(buf, 16))); 4360 subs(len, len, 32); 4361 crc32x(crc, crc, tmp0); 4362 ldr(tmp2, Address(post(buf, 8))); 4363 crc32x(crc, crc, tmp1); 4364 ldr(tmp3, Address(post(buf, 8))); 4365 crc32x(crc, crc, tmp2); 4366 crc32x(crc, crc, tmp3); 4367 br(Assembler::GE, CRC_by32_loop); 4368 cmn(len, (u1)32); 4369 br(Assembler::NE, CRC_less32); 4370 b(L_exit); 4371 4372 BIND(CRC_by4_loop); 4373 ldrw(tmp0, Address(post(buf, 4))); 4374 subs(len, len, 4); 4375 crc32w(crc, crc, tmp0); 4376 br(Assembler::GE, CRC_by4_loop); 4377 adds(len, len, 4); 4378 br(Assembler::LE, L_exit); 4379 BIND(CRC_by1_loop); 4380 ldrb(tmp0, Address(post(buf, 1))); 4381 subs(len, len, 1); 4382 crc32b(crc, crc, tmp0); 4383 br(Assembler::GT, CRC_by1_loop); 4384 b(L_exit); 4385 4386 BIND(CRC_by64_pre); 4387 sub(buf, buf, 8); 4388 ldp(tmp0, tmp1, Address(buf, 8)); 4389 crc32x(crc, crc, tmp0); 4390 ldr(tmp2, Address(buf, 24)); 4391 crc32x(crc, crc, tmp1); 4392 ldr(tmp3, Address(buf, 32)); 4393 crc32x(crc, crc, tmp2); 4394 ldr(tmp0, Address(buf, 40)); 4395 crc32x(crc, crc, tmp3); 4396 ldr(tmp1, Address(buf, 48)); 4397 crc32x(crc, crc, tmp0); 4398 ldr(tmp2, Address(buf, 56)); 4399 crc32x(crc, crc, tmp1); 4400 ldr(tmp3, Address(pre(buf, 64))); 4401 4402 b(CRC_by64_loop); 4403 4404 align(CodeEntryAlignment); 4405 BIND(CRC_by64_loop); 4406 subs(len, len, 64); 4407 crc32x(crc, crc, tmp2); 4408 ldr(tmp0, Address(buf, 8)); 4409 crc32x(crc, crc, tmp3); 4410 ldr(tmp1, Address(buf, 16)); 4411 crc32x(crc, crc, tmp0); 4412 ldr(tmp2, Address(buf, 24)); 4413 crc32x(crc, crc, tmp1); 4414 ldr(tmp3, Address(buf, 32)); 4415 crc32x(crc, crc, tmp2); 4416 ldr(tmp0, Address(buf, 40)); 4417 crc32x(crc, crc, tmp3); 4418 ldr(tmp1, Address(buf, 48)); 4419 crc32x(crc, crc, tmp0); 4420 ldr(tmp2, Address(buf, 56)); 4421 crc32x(crc, crc, tmp1); 4422 ldr(tmp3, Address(pre(buf, 64))); 4423 br(Assembler::GE, CRC_by64_loop); 4424 4425 // post-loop 4426 crc32x(crc, crc, tmp2); 4427 crc32x(crc, crc, tmp3); 4428 4429 sub(len, len, 64); 4430 add(buf, buf, 8); 4431 cmn(len, (u1)128); 4432 br(Assembler::NE, CRC_less64); 4433 BIND(L_exit); 4434 mvnw(crc, crc); 4435 } 4436 4437 /** 4438 * @param crc register containing existing CRC (32-bit) 4439 * @param buf register pointing to input byte buffer (byte*) 4440 * @param len register containing number of bytes 4441 * @param table register that will contain address of CRC table 4442 * @param tmp scratch register 4443 */ 4444 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4445 Register table0, Register table1, Register table2, Register table3, 4446 Register tmp, Register tmp2, Register tmp3) { 4447 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4448 4449 if (UseCryptoPmullForCRC32) { 4450 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4451 return; 4452 } 4453 4454 if (UseCRC32) { 4455 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4456 return; 4457 } 4458 4459 mvnw(crc, crc); 4460 4461 { 4462 uint64_t offset; 4463 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4464 add(table0, table0, offset); 4465 } 4466 add(table1, table0, 1*256*sizeof(juint)); 4467 add(table2, table0, 2*256*sizeof(juint)); 4468 add(table3, table0, 3*256*sizeof(juint)); 4469 4470 { // Neon code start 4471 cmp(len, (u1)64); 4472 br(Assembler::LT, L_by16); 4473 eor(v16, T16B, v16, v16); 4474 4475 Label L_fold; 4476 4477 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4478 4479 ld1(v0, v1, T2D, post(buf, 32)); 4480 ld1r(v4, T2D, post(tmp, 8)); 4481 ld1r(v5, T2D, post(tmp, 8)); 4482 ld1r(v6, T2D, post(tmp, 8)); 4483 ld1r(v7, T2D, post(tmp, 8)); 4484 mov(v16, S, 0, crc); 4485 4486 eor(v0, T16B, v0, v16); 4487 sub(len, len, 64); 4488 4489 BIND(L_fold); 4490 pmull(v22, T8H, v0, v5, T8B); 4491 pmull(v20, T8H, v0, v7, T8B); 4492 pmull(v23, T8H, v0, v4, T8B); 4493 pmull(v21, T8H, v0, v6, T8B); 4494 4495 pmull2(v18, T8H, v0, v5, T16B); 4496 pmull2(v16, T8H, v0, v7, T16B); 4497 pmull2(v19, T8H, v0, v4, T16B); 4498 pmull2(v17, T8H, v0, v6, T16B); 4499 4500 uzp1(v24, T8H, v20, v22); 4501 uzp2(v25, T8H, v20, v22); 4502 eor(v20, T16B, v24, v25); 4503 4504 uzp1(v26, T8H, v16, v18); 4505 uzp2(v27, T8H, v16, v18); 4506 eor(v16, T16B, v26, v27); 4507 4508 ushll2(v22, T4S, v20, T8H, 8); 4509 ushll(v20, T4S, v20, T4H, 8); 4510 4511 ushll2(v18, T4S, v16, T8H, 8); 4512 ushll(v16, T4S, v16, T4H, 8); 4513 4514 eor(v22, T16B, v23, v22); 4515 eor(v18, T16B, v19, v18); 4516 eor(v20, T16B, v21, v20); 4517 eor(v16, T16B, v17, v16); 4518 4519 uzp1(v17, T2D, v16, v20); 4520 uzp2(v21, T2D, v16, v20); 4521 eor(v17, T16B, v17, v21); 4522 4523 ushll2(v20, T2D, v17, T4S, 16); 4524 ushll(v16, T2D, v17, T2S, 16); 4525 4526 eor(v20, T16B, v20, v22); 4527 eor(v16, T16B, v16, v18); 4528 4529 uzp1(v17, T2D, v20, v16); 4530 uzp2(v21, T2D, v20, v16); 4531 eor(v28, T16B, v17, v21); 4532 4533 pmull(v22, T8H, v1, v5, T8B); 4534 pmull(v20, T8H, v1, v7, T8B); 4535 pmull(v23, T8H, v1, v4, T8B); 4536 pmull(v21, T8H, v1, v6, T8B); 4537 4538 pmull2(v18, T8H, v1, v5, T16B); 4539 pmull2(v16, T8H, v1, v7, T16B); 4540 pmull2(v19, T8H, v1, v4, T16B); 4541 pmull2(v17, T8H, v1, v6, T16B); 4542 4543 ld1(v0, v1, T2D, post(buf, 32)); 4544 4545 uzp1(v24, T8H, v20, v22); 4546 uzp2(v25, T8H, v20, v22); 4547 eor(v20, T16B, v24, v25); 4548 4549 uzp1(v26, T8H, v16, v18); 4550 uzp2(v27, T8H, v16, v18); 4551 eor(v16, T16B, v26, v27); 4552 4553 ushll2(v22, T4S, v20, T8H, 8); 4554 ushll(v20, T4S, v20, T4H, 8); 4555 4556 ushll2(v18, T4S, v16, T8H, 8); 4557 ushll(v16, T4S, v16, T4H, 8); 4558 4559 eor(v22, T16B, v23, v22); 4560 eor(v18, T16B, v19, v18); 4561 eor(v20, T16B, v21, v20); 4562 eor(v16, T16B, v17, v16); 4563 4564 uzp1(v17, T2D, v16, v20); 4565 uzp2(v21, T2D, v16, v20); 4566 eor(v16, T16B, v17, v21); 4567 4568 ushll2(v20, T2D, v16, T4S, 16); 4569 ushll(v16, T2D, v16, T2S, 16); 4570 4571 eor(v20, T16B, v22, v20); 4572 eor(v16, T16B, v16, v18); 4573 4574 uzp1(v17, T2D, v20, v16); 4575 uzp2(v21, T2D, v20, v16); 4576 eor(v20, T16B, v17, v21); 4577 4578 shl(v16, T2D, v28, 1); 4579 shl(v17, T2D, v20, 1); 4580 4581 eor(v0, T16B, v0, v16); 4582 eor(v1, T16B, v1, v17); 4583 4584 subs(len, len, 32); 4585 br(Assembler::GE, L_fold); 4586 4587 mov(crc, 0); 4588 mov(tmp, v0, D, 0); 4589 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4590 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4591 mov(tmp, v0, D, 1); 4592 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4593 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4594 mov(tmp, v1, D, 0); 4595 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4596 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4597 mov(tmp, v1, D, 1); 4598 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4599 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4600 4601 add(len, len, 32); 4602 } // Neon code end 4603 4604 BIND(L_by16); 4605 subs(len, len, 16); 4606 br(Assembler::GE, L_by16_loop); 4607 adds(len, len, 16-4); 4608 br(Assembler::GE, L_by4_loop); 4609 adds(len, len, 4); 4610 br(Assembler::GT, L_by1_loop); 4611 b(L_exit); 4612 4613 BIND(L_by4_loop); 4614 ldrw(tmp, Address(post(buf, 4))); 4615 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4616 subs(len, len, 4); 4617 br(Assembler::GE, L_by4_loop); 4618 adds(len, len, 4); 4619 br(Assembler::LE, L_exit); 4620 BIND(L_by1_loop); 4621 subs(len, len, 1); 4622 ldrb(tmp, Address(post(buf, 1))); 4623 update_byte_crc32(crc, tmp, table0); 4624 br(Assembler::GT, L_by1_loop); 4625 b(L_exit); 4626 4627 align(CodeEntryAlignment); 4628 BIND(L_by16_loop); 4629 subs(len, len, 16); 4630 ldp(tmp, tmp3, Address(post(buf, 16))); 4631 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4632 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4633 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4634 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4635 br(Assembler::GE, L_by16_loop); 4636 adds(len, len, 16-4); 4637 br(Assembler::GE, L_by4_loop); 4638 adds(len, len, 4); 4639 br(Assembler::GT, L_by1_loop); 4640 BIND(L_exit); 4641 mvnw(crc, crc); 4642 } 4643 4644 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4645 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4646 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4647 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4648 4649 subs(tmp0, len, 384); 4650 br(Assembler::GE, CRC_by128_pre); 4651 BIND(CRC_less128); 4652 subs(len, len, 32); 4653 br(Assembler::GE, CRC_by32_loop); 4654 BIND(CRC_less32); 4655 adds(len, len, 32 - 4); 4656 br(Assembler::GE, CRC_by4_loop); 4657 adds(len, len, 4); 4658 br(Assembler::GT, CRC_by1_loop); 4659 b(L_exit); 4660 4661 BIND(CRC_by32_loop); 4662 ldp(tmp0, tmp1, Address(buf)); 4663 crc32cx(crc, crc, tmp0); 4664 ldr(tmp2, Address(buf, 16)); 4665 crc32cx(crc, crc, tmp1); 4666 ldr(tmp3, Address(buf, 24)); 4667 crc32cx(crc, crc, tmp2); 4668 add(buf, buf, 32); 4669 subs(len, len, 32); 4670 crc32cx(crc, crc, tmp3); 4671 br(Assembler::GE, CRC_by32_loop); 4672 cmn(len, (u1)32); 4673 br(Assembler::NE, CRC_less32); 4674 b(L_exit); 4675 4676 BIND(CRC_by4_loop); 4677 ldrw(tmp0, Address(post(buf, 4))); 4678 subs(len, len, 4); 4679 crc32cw(crc, crc, tmp0); 4680 br(Assembler::GE, CRC_by4_loop); 4681 adds(len, len, 4); 4682 br(Assembler::LE, L_exit); 4683 BIND(CRC_by1_loop); 4684 ldrb(tmp0, Address(post(buf, 1))); 4685 subs(len, len, 1); 4686 crc32cb(crc, crc, tmp0); 4687 br(Assembler::GT, CRC_by1_loop); 4688 b(L_exit); 4689 4690 BIND(CRC_by128_pre); 4691 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4692 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4693 mov(crc, 0); 4694 crc32cx(crc, crc, tmp0); 4695 crc32cx(crc, crc, tmp1); 4696 4697 cbnz(len, CRC_less128); 4698 4699 BIND(L_exit); 4700 } 4701 4702 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4703 Register len, Register tmp0, Register tmp1, Register tmp2, 4704 Register tmp3) { 4705 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4706 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4707 4708 subs(len, len, 128); 4709 br(Assembler::GE, CRC_by64_pre); 4710 BIND(CRC_less64); 4711 adds(len, len, 128-32); 4712 br(Assembler::GE, CRC_by32_loop); 4713 BIND(CRC_less32); 4714 adds(len, len, 32-4); 4715 br(Assembler::GE, CRC_by4_loop); 4716 adds(len, len, 4); 4717 br(Assembler::GT, CRC_by1_loop); 4718 b(L_exit); 4719 4720 BIND(CRC_by32_loop); 4721 ldp(tmp0, tmp1, Address(post(buf, 16))); 4722 subs(len, len, 32); 4723 crc32cx(crc, crc, tmp0); 4724 ldr(tmp2, Address(post(buf, 8))); 4725 crc32cx(crc, crc, tmp1); 4726 ldr(tmp3, Address(post(buf, 8))); 4727 crc32cx(crc, crc, tmp2); 4728 crc32cx(crc, crc, tmp3); 4729 br(Assembler::GE, CRC_by32_loop); 4730 cmn(len, (u1)32); 4731 br(Assembler::NE, CRC_less32); 4732 b(L_exit); 4733 4734 BIND(CRC_by4_loop); 4735 ldrw(tmp0, Address(post(buf, 4))); 4736 subs(len, len, 4); 4737 crc32cw(crc, crc, tmp0); 4738 br(Assembler::GE, CRC_by4_loop); 4739 adds(len, len, 4); 4740 br(Assembler::LE, L_exit); 4741 BIND(CRC_by1_loop); 4742 ldrb(tmp0, Address(post(buf, 1))); 4743 subs(len, len, 1); 4744 crc32cb(crc, crc, tmp0); 4745 br(Assembler::GT, CRC_by1_loop); 4746 b(L_exit); 4747 4748 BIND(CRC_by64_pre); 4749 sub(buf, buf, 8); 4750 ldp(tmp0, tmp1, Address(buf, 8)); 4751 crc32cx(crc, crc, tmp0); 4752 ldr(tmp2, Address(buf, 24)); 4753 crc32cx(crc, crc, tmp1); 4754 ldr(tmp3, Address(buf, 32)); 4755 crc32cx(crc, crc, tmp2); 4756 ldr(tmp0, Address(buf, 40)); 4757 crc32cx(crc, crc, tmp3); 4758 ldr(tmp1, Address(buf, 48)); 4759 crc32cx(crc, crc, tmp0); 4760 ldr(tmp2, Address(buf, 56)); 4761 crc32cx(crc, crc, tmp1); 4762 ldr(tmp3, Address(pre(buf, 64))); 4763 4764 b(CRC_by64_loop); 4765 4766 align(CodeEntryAlignment); 4767 BIND(CRC_by64_loop); 4768 subs(len, len, 64); 4769 crc32cx(crc, crc, tmp2); 4770 ldr(tmp0, Address(buf, 8)); 4771 crc32cx(crc, crc, tmp3); 4772 ldr(tmp1, Address(buf, 16)); 4773 crc32cx(crc, crc, tmp0); 4774 ldr(tmp2, Address(buf, 24)); 4775 crc32cx(crc, crc, tmp1); 4776 ldr(tmp3, Address(buf, 32)); 4777 crc32cx(crc, crc, tmp2); 4778 ldr(tmp0, Address(buf, 40)); 4779 crc32cx(crc, crc, tmp3); 4780 ldr(tmp1, Address(buf, 48)); 4781 crc32cx(crc, crc, tmp0); 4782 ldr(tmp2, Address(buf, 56)); 4783 crc32cx(crc, crc, tmp1); 4784 ldr(tmp3, Address(pre(buf, 64))); 4785 br(Assembler::GE, CRC_by64_loop); 4786 4787 // post-loop 4788 crc32cx(crc, crc, tmp2); 4789 crc32cx(crc, crc, tmp3); 4790 4791 sub(len, len, 64); 4792 add(buf, buf, 8); 4793 cmn(len, (u1)128); 4794 br(Assembler::NE, CRC_less64); 4795 BIND(L_exit); 4796 } 4797 4798 /** 4799 * @param crc register containing existing CRC (32-bit) 4800 * @param buf register pointing to input byte buffer (byte*) 4801 * @param len register containing number of bytes 4802 * @param table register that will contain address of CRC table 4803 * @param tmp scratch register 4804 */ 4805 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4806 Register table0, Register table1, Register table2, Register table3, 4807 Register tmp, Register tmp2, Register tmp3) { 4808 if (UseCryptoPmullForCRC32) { 4809 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4810 } else { 4811 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4812 } 4813 } 4814 4815 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4816 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4817 Label CRC_by128_loop; 4818 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4819 4820 sub(len, len, 256); 4821 Register table = tmp0; 4822 { 4823 uint64_t offset; 4824 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4825 add(table, table, offset); 4826 } 4827 add(table, table, table_offset); 4828 4829 // Registers v0..v7 are used as data registers. 4830 // Registers v16..v31 are used as tmp registers. 4831 sub(buf, buf, 0x10); 4832 ldrq(v0, Address(buf, 0x10)); 4833 ldrq(v1, Address(buf, 0x20)); 4834 ldrq(v2, Address(buf, 0x30)); 4835 ldrq(v3, Address(buf, 0x40)); 4836 ldrq(v4, Address(buf, 0x50)); 4837 ldrq(v5, Address(buf, 0x60)); 4838 ldrq(v6, Address(buf, 0x70)); 4839 ldrq(v7, Address(pre(buf, 0x80))); 4840 4841 movi(v31, T4S, 0); 4842 mov(v31, S, 0, crc); 4843 eor(v0, T16B, v0, v31); 4844 4845 // Register v16 contains constants from the crc table. 4846 ldrq(v16, Address(table)); 4847 b(CRC_by128_loop); 4848 4849 align(OptoLoopAlignment); 4850 BIND(CRC_by128_loop); 4851 pmull (v17, T1Q, v0, v16, T1D); 4852 pmull2(v18, T1Q, v0, v16, T2D); 4853 ldrq(v0, Address(buf, 0x10)); 4854 eor3(v0, T16B, v17, v18, v0); 4855 4856 pmull (v19, T1Q, v1, v16, T1D); 4857 pmull2(v20, T1Q, v1, v16, T2D); 4858 ldrq(v1, Address(buf, 0x20)); 4859 eor3(v1, T16B, v19, v20, v1); 4860 4861 pmull (v21, T1Q, v2, v16, T1D); 4862 pmull2(v22, T1Q, v2, v16, T2D); 4863 ldrq(v2, Address(buf, 0x30)); 4864 eor3(v2, T16B, v21, v22, v2); 4865 4866 pmull (v23, T1Q, v3, v16, T1D); 4867 pmull2(v24, T1Q, v3, v16, T2D); 4868 ldrq(v3, Address(buf, 0x40)); 4869 eor3(v3, T16B, v23, v24, v3); 4870 4871 pmull (v25, T1Q, v4, v16, T1D); 4872 pmull2(v26, T1Q, v4, v16, T2D); 4873 ldrq(v4, Address(buf, 0x50)); 4874 eor3(v4, T16B, v25, v26, v4); 4875 4876 pmull (v27, T1Q, v5, v16, T1D); 4877 pmull2(v28, T1Q, v5, v16, T2D); 4878 ldrq(v5, Address(buf, 0x60)); 4879 eor3(v5, T16B, v27, v28, v5); 4880 4881 pmull (v29, T1Q, v6, v16, T1D); 4882 pmull2(v30, T1Q, v6, v16, T2D); 4883 ldrq(v6, Address(buf, 0x70)); 4884 eor3(v6, T16B, v29, v30, v6); 4885 4886 // Reuse registers v23, v24. 4887 // Using them won't block the first instruction of the next iteration. 4888 pmull (v23, T1Q, v7, v16, T1D); 4889 pmull2(v24, T1Q, v7, v16, T2D); 4890 ldrq(v7, Address(pre(buf, 0x80))); 4891 eor3(v7, T16B, v23, v24, v7); 4892 4893 subs(len, len, 0x80); 4894 br(Assembler::GE, CRC_by128_loop); 4895 4896 // fold into 512 bits 4897 // Use v31 for constants because v16 can be still in use. 4898 ldrq(v31, Address(table, 0x10)); 4899 4900 pmull (v17, T1Q, v0, v31, T1D); 4901 pmull2(v18, T1Q, v0, v31, T2D); 4902 eor3(v0, T16B, v17, v18, v4); 4903 4904 pmull (v19, T1Q, v1, v31, T1D); 4905 pmull2(v20, T1Q, v1, v31, T2D); 4906 eor3(v1, T16B, v19, v20, v5); 4907 4908 pmull (v21, T1Q, v2, v31, T1D); 4909 pmull2(v22, T1Q, v2, v31, T2D); 4910 eor3(v2, T16B, v21, v22, v6); 4911 4912 pmull (v23, T1Q, v3, v31, T1D); 4913 pmull2(v24, T1Q, v3, v31, T2D); 4914 eor3(v3, T16B, v23, v24, v7); 4915 4916 // fold into 128 bits 4917 // Use v17 for constants because v31 can be still in use. 4918 ldrq(v17, Address(table, 0x20)); 4919 pmull (v25, T1Q, v0, v17, T1D); 4920 pmull2(v26, T1Q, v0, v17, T2D); 4921 eor3(v3, T16B, v3, v25, v26); 4922 4923 // Use v18 for constants because v17 can be still in use. 4924 ldrq(v18, Address(table, 0x30)); 4925 pmull (v27, T1Q, v1, v18, T1D); 4926 pmull2(v28, T1Q, v1, v18, T2D); 4927 eor3(v3, T16B, v3, v27, v28); 4928 4929 // Use v19 for constants because v18 can be still in use. 4930 ldrq(v19, Address(table, 0x40)); 4931 pmull (v29, T1Q, v2, v19, T1D); 4932 pmull2(v30, T1Q, v2, v19, T2D); 4933 eor3(v0, T16B, v3, v29, v30); 4934 4935 add(len, len, 0x80); 4936 add(buf, buf, 0x10); 4937 4938 mov(tmp0, v0, D, 0); 4939 mov(tmp1, v0, D, 1); 4940 } 4941 4942 SkipIfEqual::SkipIfEqual( 4943 MacroAssembler* masm, const bool* flag_addr, bool value) { 4944 _masm = masm; 4945 uint64_t offset; 4946 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 4947 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 4948 if (value) { 4949 _masm->cbnzw(rscratch1, _label); 4950 } else { 4951 _masm->cbzw(rscratch1, _label); 4952 } 4953 } 4954 4955 SkipIfEqual::~SkipIfEqual() { 4956 _masm->bind(_label); 4957 } 4958 4959 void MacroAssembler::addptr(const Address &dst, int32_t src) { 4960 Address adr; 4961 switch(dst.getMode()) { 4962 case Address::base_plus_offset: 4963 // This is the expected mode, although we allow all the other 4964 // forms below. 4965 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 4966 break; 4967 default: 4968 lea(rscratch2, dst); 4969 adr = Address(rscratch2); 4970 break; 4971 } 4972 ldr(rscratch1, adr); 4973 add(rscratch1, rscratch1, src); 4974 str(rscratch1, adr); 4975 } 4976 4977 void MacroAssembler::cmpptr(Register src1, Address src2) { 4978 uint64_t offset; 4979 adrp(rscratch1, src2, offset); 4980 ldr(rscratch1, Address(rscratch1, offset)); 4981 cmp(src1, rscratch1); 4982 } 4983 4984 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 4985 cmp(obj1, obj2); 4986 } 4987 4988 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 4989 load_method_holder(rresult, rmethod); 4990 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 4991 } 4992 4993 void MacroAssembler::load_method_holder(Register holder, Register method) { 4994 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 4995 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 4996 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 4997 } 4998 4999 void MacroAssembler::load_metadata(Register dst, Register src) { 5000 if (UseCompressedClassPointers) { 5001 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5002 } else { 5003 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5004 } 5005 } 5006 5007 void MacroAssembler::load_klass(Register dst, Register src) { 5008 if (UseCompressedClassPointers) { 5009 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5010 decode_klass_not_null(dst); 5011 } else { 5012 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5013 } 5014 } 5015 5016 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 5017 if (RestoreMXCSROnJNICalls) { 5018 Label OK; 5019 get_fpcr(tmp1); 5020 mov(tmp2, tmp1); 5021 // Set FPCR to the state we need. We do want Round to Nearest. We 5022 // don't want non-IEEE rounding modes or floating-point traps. 5023 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 5024 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 5025 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 5026 eor(tmp2, tmp1, tmp2); 5027 cbz(tmp2, OK); // Only reset FPCR if it's wrong 5028 set_fpcr(tmp1); 5029 bind(OK); 5030 } 5031 } 5032 5033 // ((OopHandle)result).resolve(); 5034 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 5035 // OopHandle::resolve is an indirection. 5036 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 5037 } 5038 5039 // ((WeakHandle)result).resolve(); 5040 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 5041 assert_different_registers(result, tmp1, tmp2); 5042 Label resolved; 5043 5044 // A null weak handle resolves to null. 5045 cbz(result, resolved); 5046 5047 // Only 64 bit platforms support GCs that require a tmp register 5048 // WeakHandle::resolve is an indirection like jweak. 5049 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5050 result, Address(result), tmp1, tmp2); 5051 bind(resolved); 5052 } 5053 5054 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 5055 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5056 ldr(dst, Address(rmethod, Method::const_offset())); 5057 ldr(dst, Address(dst, ConstMethod::constants_offset())); 5058 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 5059 ldr(dst, Address(dst, mirror_offset)); 5060 resolve_oop_handle(dst, tmp1, tmp2); 5061 } 5062 5063 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 5064 if (UseCompressedClassPointers) { 5065 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 5066 if (CompressedKlassPointers::base() == nullptr) { 5067 cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); 5068 return; 5069 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 5070 && CompressedKlassPointers::shift() == 0) { 5071 // Only the bottom 32 bits matter 5072 cmpw(trial_klass, tmp); 5073 return; 5074 } 5075 decode_klass_not_null(tmp); 5076 } else { 5077 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 5078 } 5079 cmp(trial_klass, tmp); 5080 } 5081 5082 void MacroAssembler::load_prototype_header(Register dst, Register src) { 5083 load_klass(dst, src); 5084 ldr(dst, Address(dst, Klass::prototype_header_offset())); 5085 } 5086 5087 void MacroAssembler::store_klass(Register dst, Register src) { 5088 // FIXME: Should this be a store release? concurrent gcs assumes 5089 // klass length is valid if klass field is not null. 5090 if (UseCompressedClassPointers) { 5091 encode_klass_not_null(src); 5092 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5093 } else { 5094 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5095 } 5096 } 5097 5098 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5099 if (UseCompressedClassPointers) { 5100 // Store to klass gap in destination 5101 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 5102 } 5103 } 5104 5105 // Algorithm must match CompressedOops::encode. 5106 void MacroAssembler::encode_heap_oop(Register d, Register s) { 5107 #ifdef ASSERT 5108 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5109 #endif 5110 verify_oop_msg(s, "broken oop in encode_heap_oop"); 5111 if (CompressedOops::base() == nullptr) { 5112 if (CompressedOops::shift() != 0) { 5113 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5114 lsr(d, s, LogMinObjAlignmentInBytes); 5115 } else { 5116 mov(d, s); 5117 } 5118 } else { 5119 subs(d, s, rheapbase); 5120 csel(d, d, zr, Assembler::HS); 5121 lsr(d, d, LogMinObjAlignmentInBytes); 5122 5123 /* Old algorithm: is this any worse? 5124 Label nonnull; 5125 cbnz(r, nonnull); 5126 sub(r, r, rheapbase); 5127 bind(nonnull); 5128 lsr(r, r, LogMinObjAlignmentInBytes); 5129 */ 5130 } 5131 } 5132 5133 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5134 #ifdef ASSERT 5135 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5136 if (CheckCompressedOops) { 5137 Label ok; 5138 cbnz(r, ok); 5139 stop("null oop passed to encode_heap_oop_not_null"); 5140 bind(ok); 5141 } 5142 #endif 5143 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5144 if (CompressedOops::base() != nullptr) { 5145 sub(r, r, rheapbase); 5146 } 5147 if (CompressedOops::shift() != 0) { 5148 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5149 lsr(r, r, LogMinObjAlignmentInBytes); 5150 } 5151 } 5152 5153 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5154 #ifdef ASSERT 5155 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5156 if (CheckCompressedOops) { 5157 Label ok; 5158 cbnz(src, ok); 5159 stop("null oop passed to encode_heap_oop_not_null2"); 5160 bind(ok); 5161 } 5162 #endif 5163 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5164 5165 Register data = src; 5166 if (CompressedOops::base() != nullptr) { 5167 sub(dst, src, rheapbase); 5168 data = dst; 5169 } 5170 if (CompressedOops::shift() != 0) { 5171 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5172 lsr(dst, data, LogMinObjAlignmentInBytes); 5173 data = dst; 5174 } 5175 if (data == src) 5176 mov(dst, src); 5177 } 5178 5179 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5180 #ifdef ASSERT 5181 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5182 #endif 5183 if (CompressedOops::base() == nullptr) { 5184 if (CompressedOops::shift() != 0) { 5185 lsl(d, s, CompressedOops::shift()); 5186 } else if (d != s) { 5187 mov(d, s); 5188 } 5189 } else { 5190 Label done; 5191 if (d != s) 5192 mov(d, s); 5193 cbz(s, done); 5194 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5195 bind(done); 5196 } 5197 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5198 } 5199 5200 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5201 assert (UseCompressedOops, "should only be used for compressed headers"); 5202 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5203 // Cannot assert, unverified entry point counts instructions (see .ad file) 5204 // vtableStubs also counts instructions in pd_code_size_limit. 5205 // Also do not verify_oop as this is called by verify_oop. 5206 if (CompressedOops::shift() != 0) { 5207 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5208 if (CompressedOops::base() != nullptr) { 5209 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5210 } else { 5211 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5212 } 5213 } else { 5214 assert (CompressedOops::base() == nullptr, "sanity"); 5215 } 5216 } 5217 5218 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5219 assert (UseCompressedOops, "should only be used for compressed headers"); 5220 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5221 // Cannot assert, unverified entry point counts instructions (see .ad file) 5222 // vtableStubs also counts instructions in pd_code_size_limit. 5223 // Also do not verify_oop as this is called by verify_oop. 5224 if (CompressedOops::shift() != 0) { 5225 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5226 if (CompressedOops::base() != nullptr) { 5227 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5228 } else { 5229 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5230 } 5231 } else { 5232 assert (CompressedOops::base() == nullptr, "sanity"); 5233 if (dst != src) { 5234 mov(dst, src); 5235 } 5236 } 5237 } 5238 5239 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5240 5241 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5242 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5243 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5244 5245 if (_klass_decode_mode != KlassDecodeNone) { 5246 return _klass_decode_mode; 5247 } 5248 5249 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() 5250 || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); 5251 5252 if (CompressedKlassPointers::base() == nullptr) { 5253 return (_klass_decode_mode = KlassDecodeZero); 5254 } 5255 5256 if (operand_valid_for_logical_immediate( 5257 /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { 5258 const size_t range = CompressedKlassPointers::klass_range_end() - CompressedKlassPointers::base(); 5259 const uint64_t range_mask = (1ULL << log2i(range)) - 1; 5260 if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { 5261 return (_klass_decode_mode = KlassDecodeXor); 5262 } 5263 } 5264 5265 const uint64_t shifted_base = 5266 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5267 guarantee((shifted_base & 0xffff0000ffffffff) == 0, 5268 "compressed class base bad alignment"); 5269 5270 return (_klass_decode_mode = KlassDecodeMovk); 5271 } 5272 5273 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5274 switch (klass_decode_mode()) { 5275 case KlassDecodeZero: 5276 if (CompressedKlassPointers::shift() != 0) { 5277 lsr(dst, src, LogKlassAlignmentInBytes); 5278 } else { 5279 if (dst != src) mov(dst, src); 5280 } 5281 break; 5282 5283 case KlassDecodeXor: 5284 if (CompressedKlassPointers::shift() != 0) { 5285 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5286 lsr(dst, dst, LogKlassAlignmentInBytes); 5287 } else { 5288 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5289 } 5290 break; 5291 5292 case KlassDecodeMovk: 5293 if (CompressedKlassPointers::shift() != 0) { 5294 ubfx(dst, src, LogKlassAlignmentInBytes, 32); 5295 } else { 5296 movw(dst, src); 5297 } 5298 break; 5299 5300 case KlassDecodeNone: 5301 ShouldNotReachHere(); 5302 break; 5303 } 5304 } 5305 5306 void MacroAssembler::encode_klass_not_null(Register r) { 5307 encode_klass_not_null(r, r); 5308 } 5309 5310 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5311 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5312 5313 switch (klass_decode_mode()) { 5314 case KlassDecodeZero: 5315 if (CompressedKlassPointers::shift() != 0) { 5316 lsl(dst, src, LogKlassAlignmentInBytes); 5317 } else { 5318 if (dst != src) mov(dst, src); 5319 } 5320 break; 5321 5322 case KlassDecodeXor: 5323 if (CompressedKlassPointers::shift() != 0) { 5324 lsl(dst, src, LogKlassAlignmentInBytes); 5325 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5326 } else { 5327 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5328 } 5329 break; 5330 5331 case KlassDecodeMovk: { 5332 const uint64_t shifted_base = 5333 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5334 5335 if (dst != src) movw(dst, src); 5336 movk(dst, shifted_base >> 32, 32); 5337 5338 if (CompressedKlassPointers::shift() != 0) { 5339 lsl(dst, dst, LogKlassAlignmentInBytes); 5340 } 5341 5342 break; 5343 } 5344 5345 case KlassDecodeNone: 5346 ShouldNotReachHere(); 5347 break; 5348 } 5349 } 5350 5351 void MacroAssembler::decode_klass_not_null(Register r) { 5352 decode_klass_not_null(r, r); 5353 } 5354 5355 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5356 #ifdef ASSERT 5357 { 5358 ThreadInVMfromUnknown tiv; 5359 assert (UseCompressedOops, "should only be used for compressed oops"); 5360 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5361 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5362 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5363 } 5364 #endif 5365 int oop_index = oop_recorder()->find_index(obj); 5366 InstructionMark im(this); 5367 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5368 code_section()->relocate(inst_mark(), rspec); 5369 movz(dst, 0xDEAD, 16); 5370 movk(dst, 0xBEEF); 5371 } 5372 5373 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5374 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5375 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5376 int index = oop_recorder()->find_index(k); 5377 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5378 5379 InstructionMark im(this); 5380 RelocationHolder rspec = metadata_Relocation::spec(index); 5381 code_section()->relocate(inst_mark(), rspec); 5382 narrowKlass nk = CompressedKlassPointers::encode(k); 5383 movz(dst, (nk >> 16), 16); 5384 movk(dst, nk & 0xffff); 5385 } 5386 5387 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5388 Register dst, Address src, 5389 Register tmp1, Register tmp2) { 5390 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5391 decorators = AccessInternal::decorator_fixup(decorators, type); 5392 bool as_raw = (decorators & AS_RAW) != 0; 5393 if (as_raw) { 5394 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5395 } else { 5396 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5397 } 5398 } 5399 5400 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5401 Address dst, Register val, 5402 Register tmp1, Register tmp2, Register tmp3) { 5403 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5404 decorators = AccessInternal::decorator_fixup(decorators, type); 5405 bool as_raw = (decorators & AS_RAW) != 0; 5406 if (as_raw) { 5407 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5408 } else { 5409 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5410 } 5411 } 5412 5413 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, 5414 Register inline_klass) { 5415 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5416 bs->value_copy(this, decorators, src, dst, inline_klass); 5417 } 5418 5419 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst, 5420 Register inline_layout_info) { 5421 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5422 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info); 5423 } 5424 5425 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 5426 ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 5427 ldrw(offset, Address(offset, InlineKlass::first_field_offset_offset())); 5428 } 5429 5430 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 5431 // ((address) (void*) o) + vk->first_field_offset(); 5432 Register offset = (data == oop) ? rscratch1 : data; 5433 first_field_offset(inline_klass, offset); 5434 if (data == oop) { 5435 add(data, data, offset); 5436 } else { 5437 lea(data, Address(oop, offset)); 5438 } 5439 } 5440 5441 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 5442 Register index, Register data) { 5443 assert_different_registers(array, array_klass, index); 5444 assert_different_registers(rscratch1, array, index); 5445 5446 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 5447 ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset())); 5448 5449 // Klass::layout_helper_log2_element_size(lh) 5450 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 5451 lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift); 5452 andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask); 5453 lslv(index, index, rscratch1); 5454 5455 add(data, array, index); 5456 add(data, data, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT)); 5457 } 5458 5459 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5460 Register tmp2, DecoratorSet decorators) { 5461 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5462 } 5463 5464 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5465 Register tmp2, DecoratorSet decorators) { 5466 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5467 } 5468 5469 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5470 Register tmp2, Register tmp3, DecoratorSet decorators) { 5471 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5472 } 5473 5474 // Used for storing nulls. 5475 void MacroAssembler::store_heap_oop_null(Address dst) { 5476 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5477 } 5478 5479 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5480 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5481 int index = oop_recorder()->allocate_metadata_index(obj); 5482 RelocationHolder rspec = metadata_Relocation::spec(index); 5483 return Address((address)obj, rspec); 5484 } 5485 5486 // Move an oop into a register. 5487 void MacroAssembler::movoop(Register dst, jobject obj) { 5488 int oop_index; 5489 if (obj == nullptr) { 5490 oop_index = oop_recorder()->allocate_oop_index(obj); 5491 } else { 5492 #ifdef ASSERT 5493 { 5494 ThreadInVMfromUnknown tiv; 5495 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5496 } 5497 #endif 5498 oop_index = oop_recorder()->find_index(obj); 5499 } 5500 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5501 5502 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5503 mov(dst, Address((address)obj, rspec)); 5504 } else { 5505 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5506 ldr_constant(dst, Address(dummy, rspec)); 5507 } 5508 5509 } 5510 5511 // Move a metadata address into a register. 5512 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5513 int oop_index; 5514 if (obj == nullptr) { 5515 oop_index = oop_recorder()->allocate_metadata_index(obj); 5516 } else { 5517 oop_index = oop_recorder()->find_index(obj); 5518 } 5519 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5520 mov(dst, Address((address)obj, rspec)); 5521 } 5522 5523 Address MacroAssembler::constant_oop_address(jobject obj) { 5524 #ifdef ASSERT 5525 { 5526 ThreadInVMfromUnknown tiv; 5527 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5528 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5529 } 5530 #endif 5531 int oop_index = oop_recorder()->find_index(obj); 5532 return Address((address)obj, oop_Relocation::spec(oop_index)); 5533 } 5534 5535 // Object / value buffer allocation... 5536 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 5537 Register t1, Register t2, 5538 bool clear_fields, Label& alloc_failed) 5539 { 5540 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 5541 Register layout_size = t1; 5542 assert(new_obj == r0, "needs to be r0"); 5543 assert_different_registers(klass, new_obj, t1, t2); 5544 5545 // get instance_size in InstanceKlass (scaled to a count of bytes) 5546 ldrw(layout_size, Address(klass, Klass::layout_helper_offset())); 5547 // test to see if it is malformed in some way 5548 tst(layout_size, Klass::_lh_instance_slow_path_bit); 5549 br(Assembler::NE, slow_case_no_pop); 5550 5551 // Allocate the instance: 5552 // If TLAB is enabled: 5553 // Try to allocate in the TLAB. 5554 // If fails, go to the slow path. 5555 // Initialize the allocation. 5556 // Exit. 5557 // 5558 // Go to slow path. 5559 5560 if (UseTLAB) { 5561 push(klass); 5562 tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case); 5563 if (ZeroTLAB || (!clear_fields)) { 5564 // the fields have been already cleared 5565 b(initialize_header); 5566 } else { 5567 // initialize both the header and fields 5568 b(initialize_object); 5569 } 5570 5571 if (clear_fields) { 5572 // The object is initialized before the header. If the object size is 5573 // zero, go directly to the header initialization. 5574 bind(initialize_object); 5575 subs(layout_size, layout_size, sizeof(oopDesc)); 5576 br(Assembler::EQ, initialize_header); 5577 5578 // Initialize topmost object field, divide size by 8, check if odd and 5579 // test if zero. 5580 5581 #ifdef ASSERT 5582 // make sure instance_size was multiple of 8 5583 Label L; 5584 tst(layout_size, 7); 5585 br(Assembler::EQ, L); 5586 stop("object size is not multiple of 8 - adjust this code"); 5587 bind(L); 5588 // must be > 0, no extra check needed here 5589 #endif 5590 5591 lsr(layout_size, layout_size, LogBytesPerLong); 5592 5593 // initialize remaining object fields: instance_size was a multiple of 8 5594 { 5595 Label loop; 5596 Register base = t2; 5597 5598 bind(loop); 5599 add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong); 5600 str(zr, Address(rscratch1, sizeof(oopDesc) - 1*oopSize)); 5601 subs(layout_size, layout_size, 1); 5602 br(Assembler::NE, loop); 5603 } 5604 } // clear_fields 5605 5606 // initialize object header only. 5607 bind(initialize_header); 5608 pop(klass); 5609 Register mark_word = t2; 5610 ldr(mark_word, Address(klass, Klass::prototype_header_offset())); 5611 str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes ())); 5612 store_klass_gap(new_obj, zr); // zero klass gap for compressed oops 5613 mov(t2, klass); // preserve klass 5614 store_klass(new_obj, t2); // src klass reg is potentially compressed 5615 5616 // TODO: Valhalla removed SharedRuntime::dtrace_object_alloc from here ? 5617 5618 b(done); 5619 } 5620 5621 if (UseTLAB) { 5622 bind(slow_case); 5623 pop(klass); 5624 } 5625 bind(slow_case_no_pop); 5626 b(alloc_failed); 5627 5628 bind(done); 5629 } 5630 5631 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5632 void MacroAssembler::tlab_allocate(Register obj, 5633 Register var_size_in_bytes, 5634 int con_size_in_bytes, 5635 Register t1, 5636 Register t2, 5637 Label& slow_case) { 5638 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5639 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5640 } 5641 5642 void MacroAssembler::verify_tlab() { 5643 #ifdef ASSERT 5644 if (UseTLAB && VerifyOops) { 5645 Label next, ok; 5646 5647 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5648 5649 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5650 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5651 cmp(rscratch2, rscratch1); 5652 br(Assembler::HS, next); 5653 STOP("assert(top >= start)"); 5654 should_not_reach_here(); 5655 5656 bind(next); 5657 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5658 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5659 cmp(rscratch2, rscratch1); 5660 br(Assembler::HS, ok); 5661 STOP("assert(top <= end)"); 5662 should_not_reach_here(); 5663 5664 bind(ok); 5665 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5666 } 5667 #endif 5668 } 5669 5670 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) { 5671 inline_layout_info(holder_klass, index, inline_klass); 5672 ldr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset())); 5673 } 5674 5675 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) { 5676 assert_different_registers(holder_klass, index, layout_info); 5677 InlineLayoutInfo array[2]; 5678 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements 5679 if (is_power_of_2(size)) { 5680 lsl(index, index, log2i_exact(size)); // Scale index by power of 2 5681 } else { 5682 mov(layout_info, size); 5683 mul(index, index, layout_info); // Scale the index to be the entry index * array_element_size 5684 } 5685 ldr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset())); 5686 add(layout_info, layout_info, Array<InlineLayoutInfo>::base_offset_in_bytes()); 5687 lea(layout_info, Address(layout_info, index)); 5688 } 5689 5690 // Writes to stack successive pages until offset reached to check for 5691 // stack overflow + shadow pages. This clobbers tmp. 5692 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5693 assert_different_registers(tmp, size, rscratch1); 5694 mov(tmp, sp); 5695 // Bang stack for total size given plus shadow page size. 5696 // Bang one page at a time because large size can bang beyond yellow and 5697 // red zones. 5698 Label loop; 5699 mov(rscratch1, (int)os::vm_page_size()); 5700 bind(loop); 5701 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5702 subsw(size, size, rscratch1); 5703 str(size, Address(tmp)); 5704 br(Assembler::GT, loop); 5705 5706 // Bang down shadow pages too. 5707 // At this point, (tmp-0) is the last address touched, so don't 5708 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5709 // was post-decremented.) Skip this address by starting at i=1, and 5710 // touch a few more pages below. N.B. It is important to touch all 5711 // the way down to and including i=StackShadowPages. 5712 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5713 // this could be any sized move but this is can be a debugging crumb 5714 // so the bigger the better. 5715 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5716 str(size, Address(tmp)); 5717 } 5718 } 5719 5720 // Move the address of the polling page into dest. 5721 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5722 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5723 } 5724 5725 // Read the polling page. The address of the polling page must 5726 // already be in r. 5727 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5728 address mark; 5729 { 5730 InstructionMark im(this); 5731 code_section()->relocate(inst_mark(), rtype); 5732 ldrw(zr, Address(r, 0)); 5733 mark = inst_mark(); 5734 } 5735 verify_cross_modify_fence_not_required(); 5736 return mark; 5737 } 5738 5739 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5740 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5741 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5742 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5743 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5744 int64_t offset_low = dest_page - low_page; 5745 int64_t offset_high = dest_page - high_page; 5746 5747 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5748 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5749 5750 InstructionMark im(this); 5751 code_section()->relocate(inst_mark(), dest.rspec()); 5752 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5753 // the code cache so that if it is relocated we know it will still reach 5754 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5755 _adrp(reg1, dest.target()); 5756 } else { 5757 uint64_t target = (uint64_t)dest.target(); 5758 uint64_t adrp_target 5759 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5760 5761 _adrp(reg1, (address)adrp_target); 5762 movk(reg1, target >> 32, 32); 5763 } 5764 byte_offset = (uint64_t)dest.target() & 0xfff; 5765 } 5766 5767 void MacroAssembler::load_byte_map_base(Register reg) { 5768 CardTable::CardValue* byte_map_base = 5769 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5770 5771 // Strictly speaking the byte_map_base isn't an address at all, and it might 5772 // even be negative. It is thus materialised as a constant. 5773 mov(reg, (uint64_t)byte_map_base); 5774 } 5775 5776 void MacroAssembler::build_frame(int framesize) { 5777 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5778 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5779 protect_return_address(); 5780 if (framesize < ((1 << 9) + 2 * wordSize)) { 5781 sub(sp, sp, framesize); 5782 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5783 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5784 } else { 5785 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5786 if (PreserveFramePointer) mov(rfp, sp); 5787 if (framesize < ((1 << 12) + 2 * wordSize)) 5788 sub(sp, sp, framesize - 2 * wordSize); 5789 else { 5790 mov(rscratch1, framesize - 2 * wordSize); 5791 sub(sp, sp, rscratch1); 5792 } 5793 } 5794 verify_cross_modify_fence_not_required(); 5795 } 5796 5797 void MacroAssembler::remove_frame(int framesize) { 5798 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5799 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5800 if (framesize < ((1 << 9) + 2 * wordSize)) { 5801 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5802 add(sp, sp, framesize); 5803 } else { 5804 if (framesize < ((1 << 12) + 2 * wordSize)) 5805 add(sp, sp, framesize - 2 * wordSize); 5806 else { 5807 mov(rscratch1, framesize - 2 * wordSize); 5808 add(sp, sp, rscratch1); 5809 } 5810 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5811 } 5812 authenticate_return_address(); 5813 } 5814 5815 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 5816 if (needs_stack_repair) { 5817 // Remove the extension of the caller's frame used for inline type unpacking 5818 // 5819 // Right now the stack looks like this: 5820 // 5821 // | Arguments from caller | 5822 // |---------------------------| <-- caller's SP 5823 // | Saved LR #1 | 5824 // | Saved FP #1 | 5825 // |---------------------------| 5826 // | Extension space for | 5827 // | inline arg (un)packing | 5828 // |---------------------------| <-- start of this method's frame 5829 // | Saved LR #2 | 5830 // | Saved FP #2 | 5831 // |---------------------------| <-- FP 5832 // | sp_inc | 5833 // | method locals | 5834 // |---------------------------| <-- SP 5835 // 5836 // There are two copies of FP and LR on the stack. They will be identical 5837 // unless the caller has been deoptimized, in which case LR #1 will be patched 5838 // to point at the deopt blob, and LR #2 will still point into the old method. 5839 // 5840 // The sp_inc stack slot holds the total size of the frame including the 5841 // extension space minus two words for the saved FP and LR. 5842 5843 int sp_inc_offset = initial_framesize - 3 * wordSize; // Immediately below saved LR and FP 5844 5845 ldr(rscratch1, Address(sp, sp_inc_offset)); 5846 add(sp, sp, rscratch1); 5847 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5848 } else { 5849 remove_frame(initial_framesize); 5850 } 5851 } 5852 5853 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) { 5854 int real_frame_size = frame_size + sp_inc; 5855 assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value"); 5856 assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space"); 5857 assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 5858 5859 int sp_inc_offset = frame_size - 3 * wordSize; // Immediately below saved LR and FP 5860 5861 // Subtract two words for the saved FP and LR as these will be popped 5862 // separately. See remove_frame above. 5863 mov(rscratch1, real_frame_size - 2*wordSize); 5864 str(rscratch1, Address(sp, sp_inc_offset)); 5865 } 5866 5867 // This method counts leading positive bytes (highest bit not set) in provided byte array 5868 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5869 // Simple and most common case of aligned small array which is not at the 5870 // end of memory page is placed here. All other cases are in stub. 5871 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5872 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5873 assert_different_registers(ary1, len, result); 5874 5875 mov(result, len); 5876 cmpw(len, 0); 5877 br(LE, DONE); 5878 cmpw(len, 4 * wordSize); 5879 br(GE, STUB_LONG); // size > 32 then go to stub 5880 5881 int shift = 64 - exact_log2(os::vm_page_size()); 5882 lsl(rscratch1, ary1, shift); 5883 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5884 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5885 br(CS, STUB); // at the end of page then go to stub 5886 subs(len, len, wordSize); 5887 br(LT, END); 5888 5889 BIND(LOOP); 5890 ldr(rscratch1, Address(post(ary1, wordSize))); 5891 tst(rscratch1, UPPER_BIT_MASK); 5892 br(NE, SET_RESULT); 5893 subs(len, len, wordSize); 5894 br(GE, LOOP); 5895 cmpw(len, -wordSize); 5896 br(EQ, DONE); 5897 5898 BIND(END); 5899 ldr(rscratch1, Address(ary1)); 5900 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5901 lslv(rscratch1, rscratch1, rscratch2); 5902 tst(rscratch1, UPPER_BIT_MASK); 5903 br(NE, SET_RESULT); 5904 b(DONE); 5905 5906 BIND(STUB); 5907 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5908 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5909 address tpc1 = trampoline_call(count_pos); 5910 if (tpc1 == nullptr) { 5911 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5912 postcond(pc() == badAddress); 5913 return nullptr; 5914 } 5915 b(DONE); 5916 5917 BIND(STUB_LONG); 5918 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5919 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5920 address tpc2 = trampoline_call(count_pos_long); 5921 if (tpc2 == nullptr) { 5922 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5923 postcond(pc() == badAddress); 5924 return nullptr; 5925 } 5926 b(DONE); 5927 5928 BIND(SET_RESULT); 5929 5930 add(len, len, wordSize); 5931 sub(result, result, len); 5932 5933 BIND(DONE); 5934 postcond(pc() != badAddress); 5935 return pc(); 5936 } 5937 5938 // Clobbers: rscratch1, rscratch2, rflags 5939 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5940 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5941 Register tmp4, Register tmp5, Register result, 5942 Register cnt1, int elem_size) { 5943 Label DONE, SAME; 5944 Register tmp1 = rscratch1; 5945 Register tmp2 = rscratch2; 5946 int elem_per_word = wordSize/elem_size; 5947 int log_elem_size = exact_log2(elem_size); 5948 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5949 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5950 int base_offset 5951 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5952 // When the length offset is not aligned to 8 bytes, 5953 // then we align it down. This is valid because the new 5954 // offset will always be the klass which is the same 5955 // for type arrays. 5956 int start_offset = align_down(length_offset, BytesPerWord); 5957 int extra_length = base_offset - start_offset; 5958 assert(start_offset == length_offset || start_offset == klass_offset, 5959 "start offset must be 8-byte-aligned or be the klass offset"); 5960 assert(base_offset != start_offset, "must include the length field"); 5961 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5962 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5963 5964 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5965 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5966 5967 #ifndef PRODUCT 5968 { 5969 const char kind = (elem_size == 2) ? 'U' : 'L'; 5970 char comment[64]; 5971 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5972 BLOCK_COMMENT(comment); 5973 } 5974 #endif 5975 5976 // if (a1 == a2) 5977 // return true; 5978 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5979 br(EQ, SAME); 5980 5981 if (UseSimpleArrayEquals) { 5982 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5983 // if (a1 == nullptr || a2 == nullptr) 5984 // return false; 5985 // a1 & a2 == 0 means (some-pointer is null) or 5986 // (very-rare-or-even-probably-impossible-pointer-values) 5987 // so, we can save one branch in most cases 5988 tst(a1, a2); 5989 mov(result, false); 5990 br(EQ, A_MIGHT_BE_NULL); 5991 // if (a1.length != a2.length) 5992 // return false; 5993 bind(A_IS_NOT_NULL); 5994 ldrw(cnt1, Address(a1, length_offset)); 5995 // Increase loop counter by diff between base- and actual start-offset. 5996 addw(cnt1, cnt1, extra_length); 5997 lea(a1, Address(a1, start_offset)); 5998 lea(a2, Address(a2, start_offset)); 5999 // Check for short strings, i.e. smaller than wordSize. 6000 subs(cnt1, cnt1, elem_per_word); 6001 br(Assembler::LT, SHORT); 6002 // Main 8 byte comparison loop. 6003 bind(NEXT_WORD); { 6004 ldr(tmp1, Address(post(a1, wordSize))); 6005 ldr(tmp2, Address(post(a2, wordSize))); 6006 subs(cnt1, cnt1, elem_per_word); 6007 eor(tmp5, tmp1, tmp2); 6008 cbnz(tmp5, DONE); 6009 } br(GT, NEXT_WORD); 6010 // Last longword. In the case where length == 4 we compare the 6011 // same longword twice, but that's still faster than another 6012 // conditional branch. 6013 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 6014 // length == 4. 6015 if (log_elem_size > 0) 6016 lsl(cnt1, cnt1, log_elem_size); 6017 ldr(tmp3, Address(a1, cnt1)); 6018 ldr(tmp4, Address(a2, cnt1)); 6019 eor(tmp5, tmp3, tmp4); 6020 cbnz(tmp5, DONE); 6021 b(SAME); 6022 bind(A_MIGHT_BE_NULL); 6023 // in case both a1 and a2 are not-null, proceed with loads 6024 cbz(a1, DONE); 6025 cbz(a2, DONE); 6026 b(A_IS_NOT_NULL); 6027 bind(SHORT); 6028 6029 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 6030 { 6031 ldrw(tmp1, Address(post(a1, 4))); 6032 ldrw(tmp2, Address(post(a2, 4))); 6033 eorw(tmp5, tmp1, tmp2); 6034 cbnzw(tmp5, DONE); 6035 } 6036 bind(TAIL03); 6037 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 6038 { 6039 ldrh(tmp3, Address(post(a1, 2))); 6040 ldrh(tmp4, Address(post(a2, 2))); 6041 eorw(tmp5, tmp3, tmp4); 6042 cbnzw(tmp5, DONE); 6043 } 6044 bind(TAIL01); 6045 if (elem_size == 1) { // Only needed when comparing byte arrays. 6046 tbz(cnt1, 0, SAME); // 0-1 bytes left. 6047 { 6048 ldrb(tmp1, a1); 6049 ldrb(tmp2, a2); 6050 eorw(tmp5, tmp1, tmp2); 6051 cbnzw(tmp5, DONE); 6052 } 6053 } 6054 } else { 6055 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 6056 CSET_EQ, LAST_CHECK; 6057 mov(result, false); 6058 cbz(a1, DONE); 6059 ldrw(cnt1, Address(a1, length_offset)); 6060 cbz(a2, DONE); 6061 // Increase loop counter by diff between base- and actual start-offset. 6062 addw(cnt1, cnt1, extra_length); 6063 6064 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 6065 // faster to perform another branch before comparing a1 and a2 6066 cmp(cnt1, (u1)elem_per_word); 6067 br(LE, SHORT); // short or same 6068 ldr(tmp3, Address(pre(a1, start_offset))); 6069 subs(zr, cnt1, stubBytesThreshold); 6070 br(GE, STUB); 6071 ldr(tmp4, Address(pre(a2, start_offset))); 6072 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6073 6074 // Main 16 byte comparison loop with 2 exits 6075 bind(NEXT_DWORD); { 6076 ldr(tmp1, Address(pre(a1, wordSize))); 6077 ldr(tmp2, Address(pre(a2, wordSize))); 6078 subs(cnt1, cnt1, 2 * elem_per_word); 6079 br(LE, TAIL); 6080 eor(tmp4, tmp3, tmp4); 6081 cbnz(tmp4, DONE); 6082 ldr(tmp3, Address(pre(a1, wordSize))); 6083 ldr(tmp4, Address(pre(a2, wordSize))); 6084 cmp(cnt1, (u1)elem_per_word); 6085 br(LE, TAIL2); 6086 cmp(tmp1, tmp2); 6087 } br(EQ, NEXT_DWORD); 6088 b(DONE); 6089 6090 bind(TAIL); 6091 eor(tmp4, tmp3, tmp4); 6092 eor(tmp2, tmp1, tmp2); 6093 lslv(tmp2, tmp2, tmp5); 6094 orr(tmp5, tmp4, tmp2); 6095 cmp(tmp5, zr); 6096 b(CSET_EQ); 6097 6098 bind(TAIL2); 6099 eor(tmp2, tmp1, tmp2); 6100 cbnz(tmp2, DONE); 6101 b(LAST_CHECK); 6102 6103 bind(STUB); 6104 ldr(tmp4, Address(pre(a2, start_offset))); 6105 if (elem_size == 2) { // convert to byte counter 6106 lsl(cnt1, cnt1, 1); 6107 } 6108 eor(tmp5, tmp3, tmp4); 6109 cbnz(tmp5, DONE); 6110 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 6111 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 6112 address tpc = trampoline_call(stub); 6113 if (tpc == nullptr) { 6114 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 6115 postcond(pc() == badAddress); 6116 return nullptr; 6117 } 6118 b(DONE); 6119 6120 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 6121 // so, if a2 == null => return false(0), else return true, so we can return a2 6122 mov(result, a2); 6123 b(DONE); 6124 bind(SHORT); 6125 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6126 ldr(tmp3, Address(a1, start_offset)); 6127 ldr(tmp4, Address(a2, start_offset)); 6128 bind(LAST_CHECK); 6129 eor(tmp4, tmp3, tmp4); 6130 lslv(tmp5, tmp4, tmp5); 6131 cmp(tmp5, zr); 6132 bind(CSET_EQ); 6133 cset(result, EQ); 6134 b(DONE); 6135 } 6136 6137 bind(SAME); 6138 mov(result, true); 6139 // That's it. 6140 bind(DONE); 6141 6142 BLOCK_COMMENT("} array_equals"); 6143 postcond(pc() != badAddress); 6144 return pc(); 6145 } 6146 6147 // Compare Strings 6148 6149 // For Strings we're passed the address of the first characters in a1 6150 // and a2 and the length in cnt1. 6151 // There are two implementations. For arrays >= 8 bytes, all 6152 // comparisons (including the final one, which may overlap) are 6153 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 6154 // halfword, then a short, and then a byte. 6155 6156 void MacroAssembler::string_equals(Register a1, Register a2, 6157 Register result, Register cnt1) 6158 { 6159 Label SAME, DONE, SHORT, NEXT_WORD; 6160 Register tmp1 = rscratch1; 6161 Register tmp2 = rscratch2; 6162 Register cnt2 = tmp2; // cnt2 only used in array length compare 6163 6164 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 6165 6166 #ifndef PRODUCT 6167 { 6168 char comment[64]; 6169 snprintf(comment, sizeof comment, "{string_equalsL"); 6170 BLOCK_COMMENT(comment); 6171 } 6172 #endif 6173 6174 mov(result, false); 6175 6176 // Check for short strings, i.e. smaller than wordSize. 6177 subs(cnt1, cnt1, wordSize); 6178 br(Assembler::LT, SHORT); 6179 // Main 8 byte comparison loop. 6180 bind(NEXT_WORD); { 6181 ldr(tmp1, Address(post(a1, wordSize))); 6182 ldr(tmp2, Address(post(a2, wordSize))); 6183 subs(cnt1, cnt1, wordSize); 6184 eor(tmp1, tmp1, tmp2); 6185 cbnz(tmp1, DONE); 6186 } br(GT, NEXT_WORD); 6187 // Last longword. In the case where length == 4 we compare the 6188 // same longword twice, but that's still faster than another 6189 // conditional branch. 6190 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 6191 // length == 4. 6192 ldr(tmp1, Address(a1, cnt1)); 6193 ldr(tmp2, Address(a2, cnt1)); 6194 eor(tmp2, tmp1, tmp2); 6195 cbnz(tmp2, DONE); 6196 b(SAME); 6197 6198 bind(SHORT); 6199 Label TAIL03, TAIL01; 6200 6201 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 6202 { 6203 ldrw(tmp1, Address(post(a1, 4))); 6204 ldrw(tmp2, Address(post(a2, 4))); 6205 eorw(tmp1, tmp1, tmp2); 6206 cbnzw(tmp1, DONE); 6207 } 6208 bind(TAIL03); 6209 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 6210 { 6211 ldrh(tmp1, Address(post(a1, 2))); 6212 ldrh(tmp2, Address(post(a2, 2))); 6213 eorw(tmp1, tmp1, tmp2); 6214 cbnzw(tmp1, DONE); 6215 } 6216 bind(TAIL01); 6217 tbz(cnt1, 0, SAME); // 0-1 bytes left. 6218 { 6219 ldrb(tmp1, a1); 6220 ldrb(tmp2, a2); 6221 eorw(tmp1, tmp1, tmp2); 6222 cbnzw(tmp1, DONE); 6223 } 6224 // Arrays are equal. 6225 bind(SAME); 6226 mov(result, true); 6227 6228 // That's it. 6229 bind(DONE); 6230 BLOCK_COMMENT("} string_equals"); 6231 } 6232 6233 6234 // The size of the blocks erased by the zero_blocks stub. We must 6235 // handle anything smaller than this ourselves in zero_words(). 6236 const int MacroAssembler::zero_words_block_size = 8; 6237 6238 // zero_words() is used by C2 ClearArray patterns and by 6239 // C1_MacroAssembler. It is as small as possible, handling small word 6240 // counts locally and delegating anything larger to the zero_blocks 6241 // stub. It is expanded many times in compiled code, so it is 6242 // important to keep it short. 6243 6244 // ptr: Address of a buffer to be zeroed. 6245 // cnt: Count in HeapWords. 6246 // 6247 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 6248 address MacroAssembler::zero_words(Register ptr, Register cnt) 6249 { 6250 assert(is_power_of_2(zero_words_block_size), "adjust this"); 6251 6252 BLOCK_COMMENT("zero_words {"); 6253 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 6254 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6255 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6256 6257 subs(rscratch1, cnt, zero_words_block_size); 6258 Label around; 6259 br(LO, around); 6260 { 6261 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6262 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6263 // Make sure this is a C2 compilation. C1 allocates space only for 6264 // trampoline stubs generated by Call LIR ops, and in any case it 6265 // makes sense for a C1 compilation task to proceed as quickly as 6266 // possible. 6267 CompileTask* task; 6268 if (StubRoutines::aarch64::complete() 6269 && Thread::current()->is_Compiler_thread() 6270 && (task = ciEnv::current()->task()) 6271 && is_c2_compile(task->comp_level())) { 6272 address tpc = trampoline_call(zero_blocks); 6273 if (tpc == nullptr) { 6274 DEBUG_ONLY(reset_labels(around)); 6275 return nullptr; 6276 } 6277 } else { 6278 far_call(zero_blocks); 6279 } 6280 } 6281 bind(around); 6282 6283 // We have a few words left to do. zero_blocks has adjusted r10 and r11 6284 // for us. 6285 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 6286 Label l; 6287 tbz(cnt, exact_log2(i), l); 6288 for (int j = 0; j < i; j += 2) { 6289 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 6290 } 6291 bind(l); 6292 } 6293 { 6294 Label l; 6295 tbz(cnt, 0, l); 6296 str(zr, Address(ptr)); 6297 bind(l); 6298 } 6299 6300 BLOCK_COMMENT("} zero_words"); 6301 return pc(); 6302 } 6303 6304 // base: Address of a buffer to be zeroed, 8 bytes aligned. 6305 // cnt: Immediate count in HeapWords. 6306 // 6307 // r10, r11, rscratch1, and rscratch2 are clobbered. 6308 address MacroAssembler::zero_words(Register base, uint64_t cnt) 6309 { 6310 assert(wordSize <= BlockZeroingLowLimit, 6311 "increase BlockZeroingLowLimit"); 6312 address result = nullptr; 6313 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 6314 #ifndef PRODUCT 6315 { 6316 char buf[64]; 6317 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 6318 BLOCK_COMMENT(buf); 6319 } 6320 #endif 6321 if (cnt >= 16) { 6322 uint64_t loops = cnt/16; 6323 if (loops > 1) { 6324 mov(rscratch2, loops - 1); 6325 } 6326 { 6327 Label loop; 6328 bind(loop); 6329 for (int i = 0; i < 16; i += 2) { 6330 stp(zr, zr, Address(base, i * BytesPerWord)); 6331 } 6332 add(base, base, 16 * BytesPerWord); 6333 if (loops > 1) { 6334 subs(rscratch2, rscratch2, 1); 6335 br(GE, loop); 6336 } 6337 } 6338 } 6339 cnt %= 16; 6340 int i = cnt & 1; // store any odd word to start 6341 if (i) str(zr, Address(base)); 6342 for (; i < (int)cnt; i += 2) { 6343 stp(zr, zr, Address(base, i * wordSize)); 6344 } 6345 BLOCK_COMMENT("} zero_words"); 6346 result = pc(); 6347 } else { 6348 mov(r10, base); mov(r11, cnt); 6349 result = zero_words(r10, r11); 6350 } 6351 return result; 6352 } 6353 6354 // Zero blocks of memory by using DC ZVA. 6355 // 6356 // Aligns the base address first sufficiently for DC ZVA, then uses 6357 // DC ZVA repeatedly for every full block. cnt is the size to be 6358 // zeroed in HeapWords. Returns the count of words left to be zeroed 6359 // in cnt. 6360 // 6361 // NOTE: This is intended to be used in the zero_blocks() stub. If 6362 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6363 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6364 Register tmp = rscratch1; 6365 Register tmp2 = rscratch2; 6366 int zva_length = VM_Version::zva_length(); 6367 Label initial_table_end, loop_zva; 6368 Label fini; 6369 6370 // Base must be 16 byte aligned. If not just return and let caller handle it 6371 tst(base, 0x0f); 6372 br(Assembler::NE, fini); 6373 // Align base with ZVA length. 6374 neg(tmp, base); 6375 andr(tmp, tmp, zva_length - 1); 6376 6377 // tmp: the number of bytes to be filled to align the base with ZVA length. 6378 add(base, base, tmp); 6379 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6380 adr(tmp2, initial_table_end); 6381 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6382 br(tmp2); 6383 6384 for (int i = -zva_length + 16; i < 0; i += 16) 6385 stp(zr, zr, Address(base, i)); 6386 bind(initial_table_end); 6387 6388 sub(cnt, cnt, zva_length >> 3); 6389 bind(loop_zva); 6390 dc(Assembler::ZVA, base); 6391 subs(cnt, cnt, zva_length >> 3); 6392 add(base, base, zva_length); 6393 br(Assembler::GE, loop_zva); 6394 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6395 bind(fini); 6396 } 6397 6398 // base: Address of a buffer to be filled, 8 bytes aligned. 6399 // cnt: Count in 8-byte unit. 6400 // value: Value to be filled with. 6401 // base will point to the end of the buffer after filling. 6402 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6403 { 6404 // Algorithm: 6405 // 6406 // if (cnt == 0) { 6407 // return; 6408 // } 6409 // if ((p & 8) != 0) { 6410 // *p++ = v; 6411 // } 6412 // 6413 // scratch1 = cnt & 14; 6414 // cnt -= scratch1; 6415 // p += scratch1; 6416 // switch (scratch1 / 2) { 6417 // do { 6418 // cnt -= 16; 6419 // p[-16] = v; 6420 // p[-15] = v; 6421 // case 7: 6422 // p[-14] = v; 6423 // p[-13] = v; 6424 // case 6: 6425 // p[-12] = v; 6426 // p[-11] = v; 6427 // // ... 6428 // case 1: 6429 // p[-2] = v; 6430 // p[-1] = v; 6431 // case 0: 6432 // p += 16; 6433 // } while (cnt); 6434 // } 6435 // if ((cnt & 1) == 1) { 6436 // *p++ = v; 6437 // } 6438 6439 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6440 6441 Label fini, skip, entry, loop; 6442 const int unroll = 8; // Number of stp instructions we'll unroll 6443 6444 cbz(cnt, fini); 6445 tbz(base, 3, skip); 6446 str(value, Address(post(base, 8))); 6447 sub(cnt, cnt, 1); 6448 bind(skip); 6449 6450 andr(rscratch1, cnt, (unroll-1) * 2); 6451 sub(cnt, cnt, rscratch1); 6452 add(base, base, rscratch1, Assembler::LSL, 3); 6453 adr(rscratch2, entry); 6454 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6455 br(rscratch2); 6456 6457 bind(loop); 6458 add(base, base, unroll * 16); 6459 for (int i = -unroll; i < 0; i++) 6460 stp(value, value, Address(base, i * 16)); 6461 bind(entry); 6462 subs(cnt, cnt, unroll * 2); 6463 br(Assembler::GE, loop); 6464 6465 tbz(cnt, 0, fini); 6466 str(value, Address(post(base, 8))); 6467 bind(fini); 6468 } 6469 6470 // Intrinsic for 6471 // 6472 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6473 // return the number of characters copied. 6474 // - java/lang/StringUTF16.compress 6475 // return index of non-latin1 character if copy fails, otherwise 'len'. 6476 // 6477 // This version always returns the number of characters copied, and does not 6478 // clobber the 'len' register. A successful copy will complete with the post- 6479 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6480 // post-condition: 0 <= 'res' < 'len'. 6481 // 6482 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6483 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6484 // beyond the acceptable, even though the footprint would be smaller. 6485 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6486 // avoid additional bloat. 6487 // 6488 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6489 void MacroAssembler::encode_iso_array(Register src, Register dst, 6490 Register len, Register res, bool ascii, 6491 FloatRegister vtmp0, FloatRegister vtmp1, 6492 FloatRegister vtmp2, FloatRegister vtmp3, 6493 FloatRegister vtmp4, FloatRegister vtmp5) 6494 { 6495 Register cnt = res; 6496 Register max = rscratch1; 6497 Register chk = rscratch2; 6498 6499 prfm(Address(src), PLDL1STRM); 6500 movw(cnt, len); 6501 6502 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6503 6504 Label LOOP_32, DONE_32, FAIL_32; 6505 6506 BIND(LOOP_32); 6507 { 6508 cmpw(cnt, 32); 6509 br(LT, DONE_32); 6510 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6511 // Extract lower bytes. 6512 FloatRegister vlo0 = vtmp4; 6513 FloatRegister vlo1 = vtmp5; 6514 uzp1(vlo0, T16B, vtmp0, vtmp1); 6515 uzp1(vlo1, T16B, vtmp2, vtmp3); 6516 // Merge bits... 6517 orr(vtmp0, T16B, vtmp0, vtmp1); 6518 orr(vtmp2, T16B, vtmp2, vtmp3); 6519 // Extract merged upper bytes. 6520 FloatRegister vhix = vtmp0; 6521 uzp2(vhix, T16B, vtmp0, vtmp2); 6522 // ISO-check on hi-parts (all zero). 6523 // ASCII-check on lo-parts (no sign). 6524 FloatRegister vlox = vtmp1; // Merge lower bytes. 6525 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6526 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6527 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6528 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6529 ASCII(orr(chk, chk, max)); 6530 cbnz(chk, FAIL_32); 6531 subw(cnt, cnt, 32); 6532 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6533 b(LOOP_32); 6534 } 6535 BIND(FAIL_32); 6536 sub(src, src, 64); 6537 BIND(DONE_32); 6538 6539 Label LOOP_8, SKIP_8; 6540 6541 BIND(LOOP_8); 6542 { 6543 cmpw(cnt, 8); 6544 br(LT, SKIP_8); 6545 FloatRegister vhi = vtmp0; 6546 FloatRegister vlo = vtmp1; 6547 ld1(vtmp3, T8H, src); 6548 uzp1(vlo, T16B, vtmp3, vtmp3); 6549 uzp2(vhi, T16B, vtmp3, vtmp3); 6550 // ISO-check on hi-parts (all zero). 6551 // ASCII-check on lo-parts (no sign). 6552 ASCII(cm(LT, vtmp2, T16B, vlo)); 6553 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6554 ASCII(umov(max, vtmp2, B, 0)); 6555 ASCII(orr(chk, chk, max)); 6556 cbnz(chk, SKIP_8); 6557 6558 strd(vlo, Address(post(dst, 8))); 6559 subw(cnt, cnt, 8); 6560 add(src, src, 16); 6561 b(LOOP_8); 6562 } 6563 BIND(SKIP_8); 6564 6565 #undef ASCII 6566 6567 Label LOOP, DONE; 6568 6569 cbz(cnt, DONE); 6570 BIND(LOOP); 6571 { 6572 Register chr = rscratch1; 6573 ldrh(chr, Address(post(src, 2))); 6574 tst(chr, ascii ? 0xff80 : 0xff00); 6575 br(NE, DONE); 6576 strb(chr, Address(post(dst, 1))); 6577 subs(cnt, cnt, 1); 6578 br(GT, LOOP); 6579 } 6580 BIND(DONE); 6581 // Return index where we stopped. 6582 subw(res, len, cnt); 6583 } 6584 6585 // Inflate byte[] array to char[]. 6586 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6587 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6588 FloatRegister vtmp1, FloatRegister vtmp2, 6589 FloatRegister vtmp3, Register tmp4) { 6590 Label big, done, after_init, to_stub; 6591 6592 assert_different_registers(src, dst, len, tmp4, rscratch1); 6593 6594 fmovd(vtmp1, 0.0); 6595 lsrw(tmp4, len, 3); 6596 bind(after_init); 6597 cbnzw(tmp4, big); 6598 // Short string: less than 8 bytes. 6599 { 6600 Label loop, tiny; 6601 6602 cmpw(len, 4); 6603 br(LT, tiny); 6604 // Use SIMD to do 4 bytes. 6605 ldrs(vtmp2, post(src, 4)); 6606 zip1(vtmp3, T8B, vtmp2, vtmp1); 6607 subw(len, len, 4); 6608 strd(vtmp3, post(dst, 8)); 6609 6610 cbzw(len, done); 6611 6612 // Do the remaining bytes by steam. 6613 bind(loop); 6614 ldrb(tmp4, post(src, 1)); 6615 strh(tmp4, post(dst, 2)); 6616 subw(len, len, 1); 6617 6618 bind(tiny); 6619 cbnz(len, loop); 6620 6621 b(done); 6622 } 6623 6624 if (SoftwarePrefetchHintDistance >= 0) { 6625 bind(to_stub); 6626 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6627 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6628 address tpc = trampoline_call(stub); 6629 if (tpc == nullptr) { 6630 DEBUG_ONLY(reset_labels(big, done)); 6631 postcond(pc() == badAddress); 6632 return nullptr; 6633 } 6634 b(after_init); 6635 } 6636 6637 // Unpack the bytes 8 at a time. 6638 bind(big); 6639 { 6640 Label loop, around, loop_last, loop_start; 6641 6642 if (SoftwarePrefetchHintDistance >= 0) { 6643 const int large_loop_threshold = (64 + 16)/8; 6644 ldrd(vtmp2, post(src, 8)); 6645 andw(len, len, 7); 6646 cmp(tmp4, (u1)large_loop_threshold); 6647 br(GE, to_stub); 6648 b(loop_start); 6649 6650 bind(loop); 6651 ldrd(vtmp2, post(src, 8)); 6652 bind(loop_start); 6653 subs(tmp4, tmp4, 1); 6654 br(EQ, loop_last); 6655 zip1(vtmp2, T16B, vtmp2, vtmp1); 6656 ldrd(vtmp3, post(src, 8)); 6657 st1(vtmp2, T8H, post(dst, 16)); 6658 subs(tmp4, tmp4, 1); 6659 zip1(vtmp3, T16B, vtmp3, vtmp1); 6660 st1(vtmp3, T8H, post(dst, 16)); 6661 br(NE, loop); 6662 b(around); 6663 bind(loop_last); 6664 zip1(vtmp2, T16B, vtmp2, vtmp1); 6665 st1(vtmp2, T8H, post(dst, 16)); 6666 bind(around); 6667 cbz(len, done); 6668 } else { 6669 andw(len, len, 7); 6670 bind(loop); 6671 ldrd(vtmp2, post(src, 8)); 6672 sub(tmp4, tmp4, 1); 6673 zip1(vtmp3, T16B, vtmp2, vtmp1); 6674 st1(vtmp3, T8H, post(dst, 16)); 6675 cbnz(tmp4, loop); 6676 } 6677 } 6678 6679 // Do the tail of up to 8 bytes. 6680 add(src, src, len); 6681 ldrd(vtmp3, Address(src, -8)); 6682 add(dst, dst, len, ext::uxtw, 1); 6683 zip1(vtmp3, T16B, vtmp3, vtmp1); 6684 strq(vtmp3, Address(dst, -16)); 6685 6686 bind(done); 6687 postcond(pc() != badAddress); 6688 return pc(); 6689 } 6690 6691 // Compress char[] array to byte[]. 6692 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6693 // Return the array length if every element in array can be encoded, 6694 // otherwise, the index of first non-latin1 (> 0xff) character. 6695 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6696 Register res, 6697 FloatRegister tmp0, FloatRegister tmp1, 6698 FloatRegister tmp2, FloatRegister tmp3, 6699 FloatRegister tmp4, FloatRegister tmp5) { 6700 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6701 } 6702 6703 // java.math.round(double a) 6704 // Returns the closest long to the argument, with ties rounding to 6705 // positive infinity. This requires some fiddling for corner 6706 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6707 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6708 FloatRegister ftmp) { 6709 Label DONE; 6710 BLOCK_COMMENT("java_round_double: { "); 6711 fmovd(rscratch1, src); 6712 // Use RoundToNearestTiesAway unless src small and -ve. 6713 fcvtasd(dst, src); 6714 // Test if src >= 0 || abs(src) >= 0x1.0p52 6715 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6716 mov(rscratch2, julong_cast(0x1.0p52)); 6717 cmp(rscratch1, rscratch2); 6718 br(HS, DONE); { 6719 // src < 0 && abs(src) < 0x1.0p52 6720 // src may have a fractional part, so add 0.5 6721 fmovd(ftmp, 0.5); 6722 faddd(ftmp, src, ftmp); 6723 // Convert double to jlong, use RoundTowardsNegative 6724 fcvtmsd(dst, ftmp); 6725 } 6726 bind(DONE); 6727 BLOCK_COMMENT("} java_round_double"); 6728 } 6729 6730 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6731 FloatRegister ftmp) { 6732 Label DONE; 6733 BLOCK_COMMENT("java_round_float: { "); 6734 fmovs(rscratch1, src); 6735 // Use RoundToNearestTiesAway unless src small and -ve. 6736 fcvtassw(dst, src); 6737 // Test if src >= 0 || abs(src) >= 0x1.0p23 6738 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6739 mov(rscratch2, jint_cast(0x1.0p23f)); 6740 cmp(rscratch1, rscratch2); 6741 br(HS, DONE); { 6742 // src < 0 && |src| < 0x1.0p23 6743 // src may have a fractional part, so add 0.5 6744 fmovs(ftmp, 0.5f); 6745 fadds(ftmp, src, ftmp); 6746 // Convert float to jint, use RoundTowardsNegative 6747 fcvtmssw(dst, ftmp); 6748 } 6749 bind(DONE); 6750 BLOCK_COMMENT("} java_round_float"); 6751 } 6752 6753 // get_thread() can be called anywhere inside generated code so we 6754 // need to save whatever non-callee save context might get clobbered 6755 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6756 // the call setup code. 6757 // 6758 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6759 // On other systems, the helper is a usual C function. 6760 // 6761 void MacroAssembler::get_thread(Register dst) { 6762 RegSet saved_regs = 6763 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6764 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6765 6766 protect_return_address(); 6767 push(saved_regs, sp); 6768 6769 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6770 blr(lr); 6771 if (dst != c_rarg0) { 6772 mov(dst, c_rarg0); 6773 } 6774 6775 pop(saved_regs, sp); 6776 authenticate_return_address(); 6777 } 6778 6779 #ifdef COMPILER2 6780 // C2 compiled method's prolog code 6781 // Moved here from aarch64.ad to support Valhalla code belows 6782 void MacroAssembler::verified_entry(Compile* C, int sp_inc) { 6783 if (C->clinit_barrier_on_entry()) { 6784 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started"); 6785 6786 Label L_skip_barrier; 6787 6788 mov_metadata(rscratch2, C->method()->holder()->constant_encoding()); 6789 clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 6790 far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 6791 bind(L_skip_barrier); 6792 } 6793 6794 if (C->max_vector_size() > 0) { 6795 reinitialize_ptrue(); 6796 } 6797 6798 int bangsize = C->output()->bang_size_in_bytes(); 6799 if (C->output()->need_stack_bang(bangsize)) 6800 generate_stack_overflow_check(bangsize); 6801 6802 // n.b. frame size includes space for return pc and rfp 6803 const long framesize = C->output()->frame_size_in_bytes(); 6804 build_frame(framesize); 6805 6806 if (C->needs_stack_repair()) { 6807 save_stack_increment(sp_inc, framesize); 6808 } 6809 6810 if (VerifyStackAtCalls) { 6811 Unimplemented(); 6812 } 6813 } 6814 #endif // COMPILER2 6815 6816 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6817 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6818 // An inline type might be returned. If fields are in registers we 6819 // need to allocate an inline type instance and initialize it with 6820 // the value of the fields. 6821 Label skip; 6822 // We only need a new buffered inline type if a new one is not returned 6823 tbz(r0, 0, skip); 6824 int call_offset = -1; 6825 6826 // Be careful not to clobber r1-7 which hold returned fields 6827 // Also do not use callee-saved registers as these may be live in the interpreter 6828 Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12; 6829 6830 // The following code is similar to allocate_instance but has some slight differences, 6831 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6832 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6833 Label slow_case; 6834 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6835 mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed 6836 6837 if (vk != nullptr) { 6838 // Called from C1, where the return type is statically known. 6839 movptr(klass, (intptr_t)vk->get_InlineKlass()); 6840 jint lh = vk->layout_helper(); 6841 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6842 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) { 6843 tlab_allocate(r0, noreg, lh, tmp1, tmp2, slow_case); 6844 } else { 6845 b(slow_case); 6846 } 6847 } else { 6848 // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01) 6849 andr(klass, r0, -2); 6850 if (UseTLAB) { 6851 ldrw(tmp2, Address(klass, Klass::layout_helper_offset())); 6852 tst(tmp2, Klass::_lh_instance_slow_path_bit); 6853 br(Assembler::NE, slow_case); 6854 tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case); 6855 } else { 6856 b(slow_case); 6857 } 6858 } 6859 if (UseTLAB) { 6860 // 2. Initialize buffered inline instance header 6861 Register buffer_obj = r0; 6862 mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value()); 6863 str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes())); 6864 store_klass_gap(buffer_obj, zr); 6865 if (vk == nullptr) { 6866 // store_klass corrupts klass, so save it for later use (interpreter case only). 6867 mov(tmp1, klass); 6868 } 6869 store_klass(buffer_obj, klass); 6870 // 3. Initialize its fields with an inline class specific handler 6871 if (vk != nullptr) { 6872 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6873 } else { 6874 // tmp1 holds klass preserved above 6875 ldr(tmp1, Address(tmp1, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6876 ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset())); 6877 blr(tmp1); 6878 } 6879 6880 membar(Assembler::StoreStore); 6881 b(skip); 6882 } else { 6883 // Must have already branched to slow_case above. 6884 DEBUG_ONLY(should_not_reach_here()); 6885 } 6886 bind(slow_case); 6887 // We failed to allocate a new inline type, fall back to a runtime 6888 // call. Some oop field may be live in some registers but we can't 6889 // tell. That runtime call will take care of preserving them 6890 // across a GC if there's one. 6891 mov(r0, r0_preserved); 6892 6893 if (from_interpreter) { 6894 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6895 } else { 6896 far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6897 call_offset = offset(); 6898 } 6899 membar(Assembler::StoreStore); 6900 6901 bind(skip); 6902 return call_offset; 6903 } 6904 6905 // Move a value between registers/stack slots and update the reg_state 6906 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6907 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6908 if (reg_state[to->value()] == reg_written) { 6909 return true; // Already written 6910 } 6911 6912 if (from != to && bt != T_VOID) { 6913 if (reg_state[to->value()] == reg_readonly) { 6914 return false; // Not yet writable 6915 } 6916 if (from->is_reg()) { 6917 if (to->is_reg()) { 6918 if (from->is_Register() && to->is_Register()) { 6919 mov(to->as_Register(), from->as_Register()); 6920 } else if (from->is_FloatRegister() && to->is_FloatRegister()) { 6921 fmovd(to->as_FloatRegister(), from->as_FloatRegister()); 6922 } else { 6923 ShouldNotReachHere(); 6924 } 6925 } else { 6926 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size; 6927 Address to_addr = Address(sp, st_off); 6928 if (from->is_FloatRegister()) { 6929 if (bt == T_DOUBLE) { 6930 strd(from->as_FloatRegister(), to_addr); 6931 } else { 6932 assert(bt == T_FLOAT, "must be float"); 6933 strs(from->as_FloatRegister(), to_addr); 6934 } 6935 } else { 6936 str(from->as_Register(), to_addr); 6937 } 6938 } 6939 } else { 6940 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size); 6941 if (to->is_reg()) { 6942 if (to->is_FloatRegister()) { 6943 if (bt == T_DOUBLE) { 6944 ldrd(to->as_FloatRegister(), from_addr); 6945 } else { 6946 assert(bt == T_FLOAT, "must be float"); 6947 ldrs(to->as_FloatRegister(), from_addr); 6948 } 6949 } else { 6950 ldr(to->as_Register(), from_addr); 6951 } 6952 } else { 6953 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size; 6954 ldr(rscratch1, from_addr); 6955 str(rscratch1, Address(sp, st_off)); 6956 } 6957 } 6958 } 6959 6960 // Update register states 6961 reg_state[from->value()] = reg_writable; 6962 reg_state[to->value()] = reg_written; 6963 return true; 6964 } 6965 6966 // Calculate the extra stack space required for packing or unpacking inline 6967 // args and adjust the stack pointer 6968 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6969 int sp_inc = args_on_stack * VMRegImpl::stack_slot_size; 6970 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6971 assert(sp_inc > 0, "sanity"); 6972 6973 // Save a copy of the FP and LR here for deoptimization patching and frame walking 6974 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6975 6976 // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame 6977 if (sp_inc < (1 << 9)) { 6978 sub(sp, sp, sp_inc); // Fits in an immediate 6979 } else { 6980 mov(rscratch1, sp_inc); 6981 sub(sp, sp, rscratch1); 6982 } 6983 6984 return sp_inc + 2 * wordSize; // Account for the FP/LR space 6985 } 6986 6987 // Read all fields from an inline type oop and store the values in registers/stack slots 6988 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6989 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6990 RegState reg_state[]) { 6991 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6992 assert(from->is_valid(), "source must be valid"); 6993 bool progress = false; 6994 #ifdef ASSERT 6995 const int start_offset = offset(); 6996 #endif 6997 6998 Label L_null, L_notNull; 6999 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 7000 Register tmp1 = r10; 7001 Register tmp2 = r11; 7002 Register fromReg = noreg; 7003 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 7004 bool done = true; 7005 bool mark_done = true; 7006 VMReg toReg; 7007 BasicType bt; 7008 // Check if argument requires a null check 7009 bool null_check = false; 7010 VMReg nullCheckReg; 7011 while (stream.next(nullCheckReg, bt)) { 7012 if (sig->at(stream.sig_index())._offset == -1) { 7013 null_check = true; 7014 break; 7015 } 7016 } 7017 stream.reset(sig_index, to_index); 7018 while (stream.next(toReg, bt)) { 7019 assert(toReg->is_valid(), "destination must be valid"); 7020 int idx = (int)toReg->value(); 7021 if (reg_state[idx] == reg_readonly) { 7022 if (idx != from->value()) { 7023 mark_done = false; 7024 } 7025 done = false; 7026 continue; 7027 } else if (reg_state[idx] == reg_written) { 7028 continue; 7029 } 7030 assert(reg_state[idx] == reg_writable, "must be writable"); 7031 reg_state[idx] = reg_written; 7032 progress = true; 7033 7034 if (fromReg == noreg) { 7035 if (from->is_reg()) { 7036 fromReg = from->as_Register(); 7037 } else { 7038 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size; 7039 ldr(tmp1, Address(sp, st_off)); 7040 fromReg = tmp1; 7041 } 7042 if (null_check) { 7043 // Nullable inline type argument, emit null check 7044 cbz(fromReg, L_null); 7045 } 7046 } 7047 int off = sig->at(stream.sig_index())._offset; 7048 if (off == -1) { 7049 assert(null_check, "Missing null check at"); 7050 if (toReg->is_stack()) { 7051 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size; 7052 mov(tmp2, 1); 7053 str(tmp2, Address(sp, st_off)); 7054 } else { 7055 mov(toReg->as_Register(), 1); 7056 } 7057 continue; 7058 } 7059 assert(off > 0, "offset in object should be positive"); 7060 Address fromAddr = Address(fromReg, off); 7061 if (!toReg->is_FloatRegister()) { 7062 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 7063 if (is_reference_type(bt)) { 7064 load_heap_oop(dst, fromAddr, rscratch1, rscratch2); 7065 } else { 7066 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 7067 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 7068 } 7069 if (toReg->is_stack()) { 7070 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size; 7071 str(dst, Address(sp, st_off)); 7072 } 7073 } else if (bt == T_DOUBLE) { 7074 ldrd(toReg->as_FloatRegister(), fromAddr); 7075 } else { 7076 assert(bt == T_FLOAT, "must be float"); 7077 ldrs(toReg->as_FloatRegister(), fromAddr); 7078 } 7079 } 7080 if (progress && null_check) { 7081 if (done) { 7082 b(L_notNull); 7083 bind(L_null); 7084 // Set IsInit field to zero to signal that the argument is null. 7085 // Also set all oop fields to zero to make the GC happy. 7086 stream.reset(sig_index, to_index); 7087 while (stream.next(toReg, bt)) { 7088 if (sig->at(stream.sig_index())._offset == -1 || 7089 bt == T_OBJECT || bt == T_ARRAY) { 7090 if (toReg->is_stack()) { 7091 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size; 7092 str(zr, Address(sp, st_off)); 7093 } else { 7094 mov(toReg->as_Register(), zr); 7095 } 7096 } 7097 } 7098 bind(L_notNull); 7099 } else { 7100 bind(L_null); 7101 } 7102 } 7103 7104 sig_index = stream.sig_index(); 7105 to_index = stream.regs_index(); 7106 7107 if (mark_done && reg_state[from->value()] != reg_written) { 7108 // This is okay because no one else will write to that slot 7109 reg_state[from->value()] = reg_writable; 7110 } 7111 from_index--; 7112 assert(progress || (start_offset == offset()), "should not emit code"); 7113 return done; 7114 } 7115 7116 // Pack fields back into an inline type oop 7117 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 7118 VMRegPair* from, int from_count, int& from_index, VMReg to, 7119 RegState reg_state[], Register val_array) { 7120 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 7121 assert(to->is_valid(), "destination must be valid"); 7122 7123 if (reg_state[to->value()] == reg_written) { 7124 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 7125 return true; // Already written 7126 } 7127 7128 // The GC barrier expanded by store_heap_oop below may call into the 7129 // runtime so use callee-saved registers for any values that need to be 7130 // preserved. The GC barrier assembler should take care of saving the 7131 // Java argument registers. 7132 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 7133 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 7134 Register val_obj_tmp = r21; 7135 Register from_reg_tmp = r22; 7136 Register tmp1 = r14; 7137 Register tmp2 = r13; 7138 Register tmp3 = r12; 7139 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 7140 7141 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 7142 7143 if (reg_state[to->value()] == reg_readonly) { 7144 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 7145 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 7146 return false; // Not yet writable 7147 } 7148 val_obj = val_obj_tmp; 7149 } 7150 7151 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 7152 load_heap_oop(val_obj, Address(val_array, index), tmp1, tmp2); 7153 7154 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 7155 VMReg fromReg; 7156 BasicType bt; 7157 Label L_null; 7158 while (stream.next(fromReg, bt)) { 7159 assert(fromReg->is_valid(), "source must be valid"); 7160 reg_state[fromReg->value()] = reg_writable; 7161 7162 int off = sig->at(stream.sig_index())._offset; 7163 if (off == -1) { 7164 // Nullable inline type argument, emit null check 7165 Label L_notNull; 7166 if (fromReg->is_stack()) { 7167 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size; 7168 ldrb(tmp2, Address(sp, ld_off)); 7169 cbnz(tmp2, L_notNull); 7170 } else { 7171 cbnz(fromReg->as_Register(), L_notNull); 7172 } 7173 mov(val_obj, 0); 7174 b(L_null); 7175 bind(L_notNull); 7176 continue; 7177 } 7178 7179 assert(off > 0, "offset in object should be positive"); 7180 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 7181 7182 // Pack the scalarized field into the value object. 7183 Address dst(val_obj, off); 7184 7185 if (!fromReg->is_FloatRegister()) { 7186 Register src; 7187 if (fromReg->is_stack()) { 7188 src = from_reg_tmp; 7189 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size; 7190 load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false); 7191 } else { 7192 src = fromReg->as_Register(); 7193 } 7194 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 7195 if (is_reference_type(bt)) { 7196 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 7197 } else { 7198 store_sized_value(dst, src, size_in_bytes); 7199 } 7200 } else if (bt == T_DOUBLE) { 7201 strd(fromReg->as_FloatRegister(), dst); 7202 } else { 7203 assert(bt == T_FLOAT, "must be float"); 7204 strs(fromReg->as_FloatRegister(), dst); 7205 } 7206 } 7207 bind(L_null); 7208 sig_index = stream.sig_index(); 7209 from_index = stream.regs_index(); 7210 7211 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 7212 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 7213 assert(success, "to register must be writeable"); 7214 7215 return true; 7216 } 7217 7218 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 7219 return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg(); 7220 } 7221 7222 void MacroAssembler::cache_wb(Address line) { 7223 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 7224 assert(line.index() == noreg, "index should be noreg"); 7225 assert(line.offset() == 0, "offset should be 0"); 7226 // would like to assert this 7227 // assert(line._ext.shift == 0, "shift should be zero"); 7228 if (VM_Version::supports_dcpop()) { 7229 // writeback using clear virtual address to point of persistence 7230 dc(Assembler::CVAP, line.base()); 7231 } else { 7232 // no need to generate anything as Unsafe.writebackMemory should 7233 // never invoke this stub 7234 } 7235 } 7236 7237 void MacroAssembler::cache_wbsync(bool is_pre) { 7238 // we only need a barrier post sync 7239 if (!is_pre) { 7240 membar(Assembler::AnyAny); 7241 } 7242 } 7243 7244 void MacroAssembler::verify_sve_vector_length(Register tmp) { 7245 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) { 7246 return; 7247 } 7248 // Make sure that native code does not change SVE vector length. 7249 Label verify_ok; 7250 movw(tmp, zr); 7251 sve_inc(tmp, B); 7252 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 7253 br(EQ, verify_ok); 7254 stop("Error: SVE vector length has changed since jvm startup"); 7255 bind(verify_ok); 7256 } 7257 7258 void MacroAssembler::verify_ptrue() { 7259 Label verify_ok; 7260 if (!UseSVE) { 7261 return; 7262 } 7263 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 7264 sve_dec(rscratch1, B); 7265 cbz(rscratch1, verify_ok); 7266 stop("Error: the preserved predicate register (p7) elements are not all true"); 7267 bind(verify_ok); 7268 } 7269 7270 void MacroAssembler::safepoint_isb() { 7271 isb(); 7272 #ifndef PRODUCT 7273 if (VerifyCrossModifyFence) { 7274 // Clear the thread state. 7275 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 7276 } 7277 #endif 7278 } 7279 7280 #ifndef PRODUCT 7281 void MacroAssembler::verify_cross_modify_fence_not_required() { 7282 if (VerifyCrossModifyFence) { 7283 // Check if thread needs a cross modify fence. 7284 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 7285 Label fence_not_required; 7286 cbz(rscratch1, fence_not_required); 7287 // If it does then fail. 7288 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure))); 7289 mov(c_rarg0, rthread); 7290 blr(rscratch1); 7291 bind(fence_not_required); 7292 } 7293 } 7294 #endif 7295 7296 void MacroAssembler::spin_wait() { 7297 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 7298 switch (VM_Version::spin_wait_desc().inst()) { 7299 case SpinWait::NOP: 7300 nop(); 7301 break; 7302 case SpinWait::ISB: 7303 isb(); 7304 break; 7305 case SpinWait::YIELD: 7306 yield(); 7307 break; 7308 default: 7309 ShouldNotReachHere(); 7310 } 7311 } 7312 } 7313 7314 // Stack frame creation/removal 7315 7316 void MacroAssembler::enter(bool strip_ret_addr) { 7317 if (strip_ret_addr) { 7318 // Addresses can only be signed once. If there are multiple nested frames being created 7319 // in the same function, then the return address needs stripping first. 7320 strip_return_address(); 7321 } 7322 protect_return_address(); 7323 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 7324 mov(rfp, sp); 7325 } 7326 7327 void MacroAssembler::leave() { 7328 mov(sp, rfp); 7329 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 7330 authenticate_return_address(); 7331 } 7332 7333 // ROP Protection 7334 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 7335 // destroying stack frames or whenever directly loading/storing the LR to memory. 7336 // If ROP protection is not set then these functions are no-ops. 7337 // For more details on PAC see pauth_aarch64.hpp. 7338 7339 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 7340 // Uses value zero as the modifier. 7341 // 7342 void MacroAssembler::protect_return_address() { 7343 if (VM_Version::use_rop_protection()) { 7344 check_return_address(); 7345 paciaz(); 7346 } 7347 } 7348 7349 // Sign the return value in the given register. Use before updating the LR in the existing stack 7350 // frame for the current function. 7351 // Uses value zero as the modifier. 7352 // 7353 void MacroAssembler::protect_return_address(Register return_reg) { 7354 if (VM_Version::use_rop_protection()) { 7355 check_return_address(return_reg); 7356 paciza(return_reg); 7357 } 7358 } 7359 7360 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 7361 // Uses value zero as the modifier. 7362 // 7363 void MacroAssembler::authenticate_return_address() { 7364 if (VM_Version::use_rop_protection()) { 7365 autiaz(); 7366 check_return_address(); 7367 } 7368 } 7369 7370 // Authenticate the return value in the given register. Use before updating the LR in the existing 7371 // stack frame for the current function. 7372 // Uses value zero as the modifier. 7373 // 7374 void MacroAssembler::authenticate_return_address(Register return_reg) { 7375 if (VM_Version::use_rop_protection()) { 7376 autiza(return_reg); 7377 check_return_address(return_reg); 7378 } 7379 } 7380 7381 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 7382 // there is no guaranteed way of authenticating the LR. 7383 // 7384 void MacroAssembler::strip_return_address() { 7385 if (VM_Version::use_rop_protection()) { 7386 xpaclri(); 7387 } 7388 } 7389 7390 #ifndef PRODUCT 7391 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 7392 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 7393 // it is difficult to debug back to the callee function. 7394 // This function simply loads from the address in the given register. 7395 // Use directly after authentication to catch authentication failures. 7396 // Also use before signing to check that the pointer is valid and hasn't already been signed. 7397 // 7398 void MacroAssembler::check_return_address(Register return_reg) { 7399 if (VM_Version::use_rop_protection()) { 7400 ldr(zr, Address(return_reg)); 7401 } 7402 } 7403 #endif 7404 7405 // The java_calling_convention describes stack locations as ideal slots on 7406 // a frame with no abi restrictions. Since we must observe abi restrictions 7407 // (like the placement of the register window) the slots must be biased by 7408 // the following value. 7409 static int reg2offset_in(VMReg r) { 7410 // Account for saved rfp and lr 7411 // This should really be in_preserve_stack_slots 7412 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 7413 } 7414 7415 static int reg2offset_out(VMReg r) { 7416 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 7417 } 7418 7419 // On 64bit we will store integer like items to the stack as 7420 // 64bits items (AArch64 ABI) even though java would only store 7421 // 32bits for a parameter. On 32bit it will simply be 32bits 7422 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 7423 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 7424 if (src.first()->is_stack()) { 7425 if (dst.first()->is_stack()) { 7426 // stack to stack 7427 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7428 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7429 } else { 7430 // stack to reg 7431 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 7432 } 7433 } else if (dst.first()->is_stack()) { 7434 // reg to stack 7435 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 7436 } else { 7437 if (dst.first() != src.first()) { 7438 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 7439 } 7440 } 7441 } 7442 7443 // An oop arg. Must pass a handle not the oop itself 7444 void MacroAssembler::object_move( 7445 OopMap* map, 7446 int oop_handle_offset, 7447 int framesize_in_slots, 7448 VMRegPair src, 7449 VMRegPair dst, 7450 bool is_receiver, 7451 int* receiver_offset) { 7452 7453 // must pass a handle. First figure out the location we use as a handle 7454 7455 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 7456 7457 // See if oop is null if it is we need no handle 7458 7459 if (src.first()->is_stack()) { 7460 7461 // Oop is already on the stack as an argument 7462 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 7463 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 7464 if (is_receiver) { 7465 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 7466 } 7467 7468 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 7469 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 7470 // conditionally move a null 7471 cmp(rscratch1, zr); 7472 csel(rHandle, zr, rHandle, Assembler::EQ); 7473 } else { 7474 7475 // Oop is in an a register we must store it to the space we reserve 7476 // on the stack for oop_handles and pass a handle if oop is non-null 7477 7478 const Register rOop = src.first()->as_Register(); 7479 int oop_slot; 7480 if (rOop == j_rarg0) 7481 oop_slot = 0; 7482 else if (rOop == j_rarg1) 7483 oop_slot = 1; 7484 else if (rOop == j_rarg2) 7485 oop_slot = 2; 7486 else if (rOop == j_rarg3) 7487 oop_slot = 3; 7488 else if (rOop == j_rarg4) 7489 oop_slot = 4; 7490 else if (rOop == j_rarg5) 7491 oop_slot = 5; 7492 else if (rOop == j_rarg6) 7493 oop_slot = 6; 7494 else { 7495 assert(rOop == j_rarg7, "wrong register"); 7496 oop_slot = 7; 7497 } 7498 7499 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 7500 int offset = oop_slot*VMRegImpl::stack_slot_size; 7501 7502 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 7503 // Store oop in handle area, may be null 7504 str(rOop, Address(sp, offset)); 7505 if (is_receiver) { 7506 *receiver_offset = offset; 7507 } 7508 7509 cmp(rOop, zr); 7510 lea(rHandle, Address(sp, offset)); 7511 // conditionally move a null 7512 csel(rHandle, zr, rHandle, Assembler::EQ); 7513 } 7514 7515 // If arg is on the stack then place it otherwise it is already in correct reg. 7516 if (dst.first()->is_stack()) { 7517 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 7518 } 7519 } 7520 7521 // A float arg may have to do float reg int reg conversion 7522 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 7523 if (src.first()->is_stack()) { 7524 if (dst.first()->is_stack()) { 7525 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 7526 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 7527 } else { 7528 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7529 } 7530 } else if (src.first() != dst.first()) { 7531 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7532 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7533 else 7534 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7535 } 7536 } 7537 7538 // A long move 7539 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 7540 if (src.first()->is_stack()) { 7541 if (dst.first()->is_stack()) { 7542 // stack to stack 7543 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7544 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7545 } else { 7546 // stack to reg 7547 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 7548 } 7549 } else if (dst.first()->is_stack()) { 7550 // reg to stack 7551 // Do we really have to sign extend??? 7552 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 7553 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 7554 } else { 7555 if (dst.first() != src.first()) { 7556 mov(dst.first()->as_Register(), src.first()->as_Register()); 7557 } 7558 } 7559 } 7560 7561 7562 // A double move 7563 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 7564 if (src.first()->is_stack()) { 7565 if (dst.first()->is_stack()) { 7566 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7567 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7568 } else { 7569 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7570 } 7571 } else if (src.first() != dst.first()) { 7572 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7573 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7574 else 7575 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7576 } 7577 } 7578 7579 // Implements lightweight-locking. 7580 // 7581 // - obj: the object to be locked 7582 // - t1, t2, t3: temporary registers, will be destroyed 7583 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 7584 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { 7585 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7586 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); 7587 7588 Label push; 7589 const Register top = t1; 7590 const Register mark = t2; 7591 const Register t = t3; 7592 7593 // Preload the markWord. It is important that this is the first 7594 // instruction emitted as it is part of C1's null check semantics. 7595 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7596 7597 if (UseObjectMonitorTable) { 7598 // Clear cache in case fast locking succeeds. 7599 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); 7600 } 7601 7602 // Check if the lock-stack is full. 7603 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7604 cmpw(top, (unsigned)LockStack::end_offset()); 7605 br(Assembler::GE, slow); 7606 7607 // Check for recursion. 7608 subw(t, top, oopSize); 7609 ldr(t, Address(rthread, t)); 7610 cmp(obj, t); 7611 br(Assembler::EQ, push); 7612 7613 // Check header for monitor (0b10). 7614 tst(mark, markWord::monitor_value); 7615 br(Assembler::NE, slow); 7616 7617 // Try to lock. Transition lock bits 0b01 => 0b00 7618 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7619 orr(mark, mark, markWord::unlocked_value); 7620 if (EnableValhalla) { 7621 // Mask inline_type bit such that we go to the slow path if object is an inline type 7622 andr(mark, mark, ~((int) markWord::inline_type_bit_in_place)); 7623 } 7624 eor(t, mark, markWord::unlocked_value); 7625 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 7626 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 7627 br(Assembler::NE, slow); 7628 7629 bind(push); 7630 // After successful lock, push object on lock-stack. 7631 str(obj, Address(rthread, top)); 7632 addw(top, top, oopSize); 7633 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7634 } 7635 7636 // Implements lightweight-unlocking. 7637 // 7638 // - obj: the object to be unlocked 7639 // - t1, t2, t3: temporary registers 7640 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 7641 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 7642 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7643 // cmpxchg clobbers rscratch1. 7644 assert_different_registers(obj, t1, t2, t3, rscratch1); 7645 7646 #ifdef ASSERT 7647 { 7648 // Check for lock-stack underflow. 7649 Label stack_ok; 7650 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7651 cmpw(t1, (unsigned)LockStack::start_offset()); 7652 br(Assembler::GE, stack_ok); 7653 STOP("Lock-stack underflow"); 7654 bind(stack_ok); 7655 } 7656 #endif 7657 7658 Label unlocked, push_and_slow; 7659 const Register top = t1; 7660 const Register mark = t2; 7661 const Register t = t3; 7662 7663 // Check if obj is top of lock-stack. 7664 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7665 subw(top, top, oopSize); 7666 ldr(t, Address(rthread, top)); 7667 cmp(obj, t); 7668 br(Assembler::NE, slow); 7669 7670 // Pop lock-stack. 7671 DEBUG_ONLY(str(zr, Address(rthread, top));) 7672 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7673 7674 // Check if recursive. 7675 subw(t, top, oopSize); 7676 ldr(t, Address(rthread, t)); 7677 cmp(obj, t); 7678 br(Assembler::EQ, unlocked); 7679 7680 // Not recursive. Check header for monitor (0b10). 7681 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7682 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 7683 7684 #ifdef ASSERT 7685 // Check header not unlocked (0b01). 7686 Label not_unlocked; 7687 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 7688 stop("lightweight_unlock already unlocked"); 7689 bind(not_unlocked); 7690 #endif 7691 7692 // Try to unlock. Transition lock bits 0b00 => 0b01 7693 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7694 orr(t, mark, markWord::unlocked_value); 7695 cmpxchg(obj, mark, t, Assembler::xword, 7696 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 7697 br(Assembler::EQ, unlocked); 7698 7699 bind(push_and_slow); 7700 // Restore lock-stack and handle the unlock in runtime. 7701 DEBUG_ONLY(str(obj, Address(rthread, top));) 7702 addw(top, top, oopSize); 7703 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7704 b(slow); 7705 7706 bind(unlocked); 7707 }