1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_ASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_ASSEMBLER_AARCH64_HPP 28 29 #include "asm/register.hpp" 30 #include "metaprogramming/enableIf.hpp" 31 #include "utilities/checkedCast.hpp" 32 #include "utilities/debug.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 #include "utilities/macros.hpp" 35 #include <type_traits> 36 37 #ifdef __GNUC__ 38 39 // __nop needs volatile so that compiler doesn't optimize it away 40 #define NOP() asm volatile ("nop"); 41 42 #elif defined(_MSC_VER) 43 44 // Use MSVC intrinsic: https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019#I 45 #define NOP() __nop(); 46 47 #endif 48 49 50 // definitions of various symbolic names for machine registers 51 52 // First intercalls between C and Java which use 8 general registers 53 // and 8 floating registers 54 55 // we also have to copy between x86 and ARM registers but that's a 56 // secondary complication -- not all code employing C call convention 57 // executes as x86 code though -- we generate some of it 58 59 class Argument { 60 public: 61 enum { 62 n_int_register_parameters_c = 8, // r0, r1, ... r7 (c_rarg0, c_rarg1, ...) 63 n_float_register_parameters_c = 8, // v0, v1, ... v7 (c_farg0, c_farg1, ... ) 64 65 n_int_register_parameters_j = 8, // r1, ... r7, r0 (rj_rarg0, j_rarg1, ... 66 n_float_register_parameters_j = 8 // v0, v1, ... v7 (j_farg0, j_farg1, ... 67 }; 68 }; 69 70 constexpr Register c_rarg0 = r0; 71 constexpr Register c_rarg1 = r1; 72 constexpr Register c_rarg2 = r2; 73 constexpr Register c_rarg3 = r3; 74 constexpr Register c_rarg4 = r4; 75 constexpr Register c_rarg5 = r5; 76 constexpr Register c_rarg6 = r6; 77 constexpr Register c_rarg7 = r7; 78 79 constexpr FloatRegister c_farg0 = v0; 80 constexpr FloatRegister c_farg1 = v1; 81 constexpr FloatRegister c_farg2 = v2; 82 constexpr FloatRegister c_farg3 = v3; 83 constexpr FloatRegister c_farg4 = v4; 84 constexpr FloatRegister c_farg5 = v5; 85 constexpr FloatRegister c_farg6 = v6; 86 constexpr FloatRegister c_farg7 = v7; 87 88 // Symbolically name the register arguments used by the Java calling convention. 89 // We have control over the convention for java so we can do what we please. 90 // What pleases us is to offset the java calling convention so that when 91 // we call a suitable jni method the arguments are lined up and we don't 92 // have to do much shuffling. A suitable jni method is non-static and a 93 // small number of arguments 94 // 95 // |--------------------------------------------------------------------| 96 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 c_rarg6 c_rarg7 | 97 // |--------------------------------------------------------------------| 98 // | r0 r1 r2 r3 r4 r5 r6 r7 | 99 // |--------------------------------------------------------------------| 100 // | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 | 101 // |--------------------------------------------------------------------| 102 103 104 constexpr Register j_rarg0 = c_rarg1; 105 constexpr Register j_rarg1 = c_rarg2; 106 constexpr Register j_rarg2 = c_rarg3; 107 constexpr Register j_rarg3 = c_rarg4; 108 constexpr Register j_rarg4 = c_rarg5; 109 constexpr Register j_rarg5 = c_rarg6; 110 constexpr Register j_rarg6 = c_rarg7; 111 constexpr Register j_rarg7 = c_rarg0; 112 113 // Java floating args are passed as per C 114 115 constexpr FloatRegister j_farg0 = v0; 116 constexpr FloatRegister j_farg1 = v1; 117 constexpr FloatRegister j_farg2 = v2; 118 constexpr FloatRegister j_farg3 = v3; 119 constexpr FloatRegister j_farg4 = v4; 120 constexpr FloatRegister j_farg5 = v5; 121 constexpr FloatRegister j_farg6 = v6; 122 constexpr FloatRegister j_farg7 = v7; 123 124 // registers used to hold VM data either temporarily within a method 125 // or across method calls 126 127 // volatile (caller-save) registers 128 129 // r8 is used for indirect result location return 130 // we use it and r9 as scratch registers 131 constexpr Register rscratch1 = r8; 132 constexpr Register rscratch2 = r9; 133 134 // current method -- must be in a call-clobbered register 135 constexpr Register rmethod = r12; 136 137 // non-volatile (callee-save) registers are r16-29 138 // of which the following are dedicated global state 139 140 constexpr Register lr = r30; // link register 141 constexpr Register rfp = r29; // frame pointer 142 constexpr Register rthread = r28; // current thread 143 constexpr Register rheapbase = r27; // base of heap 144 constexpr Register rcpool = r26; // constant pool cache 145 constexpr Register rlocals = r24; // locals on stack 146 constexpr Register rbcp = r22; // bytecode pointer 147 constexpr Register rdispatch = r21; // dispatch table base 148 constexpr Register esp = r20; // Java expression stack pointer 149 constexpr Register r19_sender_sp = r19; // sender's SP while in interpreter 150 151 // Preserved predicate register with all elements set TRUE. 152 constexpr PRegister ptrue = p7; 153 154 #define assert_cond(ARG1) assert(ARG1, #ARG1) 155 156 namespace asm_util { 157 uint32_t encode_logical_immediate(bool is32, uint64_t imm); 158 uint32_t encode_sve_logical_immediate(unsigned elembits, uint64_t imm); 159 bool operand_valid_for_immediate_bits(int64_t imm, unsigned nbits); 160 }; 161 162 using namespace asm_util; 163 164 165 class Assembler; 166 167 class Instruction_aarch64 { 168 unsigned insn; 169 #ifdef ASSERT 170 unsigned bits; 171 #endif 172 Assembler *assem; 173 174 public: 175 176 Instruction_aarch64(class Assembler *as) { 177 #ifdef ASSERT 178 bits = 0; 179 #endif 180 insn = 0; 181 assem = as; 182 } 183 184 inline ~Instruction_aarch64(); 185 186 unsigned &get_insn() { return insn; } 187 #ifdef ASSERT 188 unsigned &get_bits() { return bits; } 189 #endif 190 191 static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) { 192 union { 193 unsigned u; 194 int n; 195 }; 196 197 u = val << (31 - hi); 198 n = n >> (31 - hi + lo); 199 return n; 200 } 201 202 static inline uint32_t extract(uint32_t val, int msb, int lsb) { 203 int nbits = msb - lsb + 1; 204 assert_cond(msb >= lsb); 205 uint32_t mask = checked_cast<uint32_t>(right_n_bits(nbits)); 206 uint32_t result = val >> lsb; 207 result &= mask; 208 return result; 209 } 210 211 static inline int32_t sextract(uint32_t val, int msb, int lsb) { 212 uint32_t uval = extract(val, msb, lsb); 213 return extend(uval, msb - lsb); 214 } 215 216 static ALWAYSINLINE void patch(address a, int msb, int lsb, uint64_t val) { 217 int nbits = msb - lsb + 1; 218 guarantee(val < (1ULL << nbits), "Field too big for insn"); 219 assert_cond(msb >= lsb); 220 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits)); 221 val <<= lsb; 222 mask <<= lsb; 223 unsigned target = *(unsigned *)a; 224 target &= ~mask; 225 target |= (unsigned)val; 226 *(unsigned *)a = target; 227 } 228 229 static void spatch(address a, int msb, int lsb, int64_t val) { 230 int nbits = msb - lsb + 1; 231 int64_t chk = val >> (nbits - 1); 232 guarantee (chk == -1 || chk == 0, "Field too big for insn at " INTPTR_FORMAT, p2i(a)); 233 uint64_t uval = val; 234 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits)); 235 uval &= mask; 236 uval <<= lsb; 237 mask <<= lsb; 238 unsigned target = *(unsigned *)a; 239 target &= ~mask; 240 target |= (unsigned)uval; 241 *(unsigned *)a = target; 242 } 243 244 void f(unsigned val, int msb, int lsb) { 245 int nbits = msb - lsb + 1; 246 guarantee(val < (1ULL << nbits), "Field too big for insn"); 247 assert_cond(msb >= lsb); 248 val <<= lsb; 249 insn |= val; 250 #ifdef ASSERT 251 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits)); 252 mask <<= lsb; 253 assert_cond((bits & mask) == 0); 254 bits |= mask; 255 #endif 256 } 257 258 void f(unsigned val, int bit) { 259 f(val, bit, bit); 260 } 261 262 void sf(int64_t val, int msb, int lsb) { 263 int nbits = msb - lsb + 1; 264 int64_t chk = val >> (nbits - 1); 265 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 266 uint64_t uval = val; 267 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits)); 268 uval &= mask; 269 f((unsigned)uval, lsb + nbits - 1, lsb); 270 } 271 272 void rf(Register r, int lsb) { 273 f(r->raw_encoding(), lsb + 4, lsb); 274 } 275 276 // reg|ZR 277 void zrf(Register r, int lsb) { 278 f(r->raw_encoding() - (r == zr), lsb + 4, lsb); 279 } 280 281 // reg|SP 282 void srf(Register r, int lsb) { 283 f(r == sp ? 31 : r->raw_encoding(), lsb + 4, lsb); 284 } 285 286 void rf(FloatRegister r, int lsb) { 287 f(r->raw_encoding(), lsb + 4, lsb); 288 } 289 290 void prf(PRegister r, int lsb) { 291 f(r->raw_encoding(), lsb + 3, lsb); 292 } 293 294 void pgrf(PRegister r, int lsb) { 295 f(r->raw_encoding(), lsb + 2, lsb); 296 } 297 298 unsigned get(int msb = 31, int lsb = 0) { 299 int nbits = msb - lsb + 1; 300 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits)) << lsb; 301 assert_cond((bits & mask) == mask); 302 return (insn & mask) >> lsb; 303 } 304 }; 305 306 #define starti Instruction_aarch64 current_insn(this); 307 308 class PrePost { 309 int _offset; 310 Register _r; 311 protected: 312 PrePost(Register reg, int o) : _offset(o), _r(reg) { } 313 ~PrePost() = default; 314 PrePost(const PrePost&) = default; 315 PrePost& operator=(const PrePost&) = default; 316 public: 317 int offset() const { return _offset; } 318 Register reg() const { return _r; } 319 }; 320 321 class Pre : public PrePost { 322 public: 323 Pre(Register reg, int o) : PrePost(reg, o) { } 324 }; 325 326 class Post : public PrePost { 327 Register _idx; 328 bool _is_postreg; 329 public: 330 Post(Register reg, int o) : PrePost(reg, o), _idx(noreg), _is_postreg(false) {} 331 Post(Register reg, Register idx) : PrePost(reg, 0), _idx(idx), _is_postreg(true) {} 332 Register idx_reg() const { return _idx; } 333 bool is_postreg() const { return _is_postreg; } 334 }; 335 336 namespace ext 337 { 338 enum operation { uxtb, uxth, uxtw, uxtx, sxtb, sxth, sxtw, sxtx }; 339 }; 340 341 // Addressing modes 342 class Address { 343 public: 344 345 enum mode { no_mode, base_plus_offset, pre, post, post_reg, 346 base_plus_offset_reg, literal }; 347 348 // Shift and extend for base reg + reg offset addressing 349 class extend { 350 int _option, _shift; 351 ext::operation _op; 352 public: 353 extend() { } 354 extend(int s, int o, ext::operation op) : _option(o), _shift(s), _op(op) { } 355 int option() const{ return _option; } 356 int shift() const { return _shift; } 357 ext::operation op() const { return _op; } 358 }; 359 360 static extend uxtw(int shift = -1) { return extend(shift, 0b010, ext::uxtw); } 361 static extend lsl(int shift = -1) { return extend(shift, 0b011, ext::uxtx); } 362 static extend sxtw(int shift = -1) { return extend(shift, 0b110, ext::sxtw); } 363 static extend sxtx(int shift = -1) { return extend(shift, 0b111, ext::sxtx); } 364 365 private: 366 struct Nonliteral { 367 Nonliteral(Register base, Register index, int64_t offset, extend ext = extend()) 368 : _base(base), _index(index), _offset(offset), _ext(ext) {} 369 Register _base; 370 Register _index; 371 int64_t _offset; 372 extend _ext; 373 }; 374 375 struct Literal { 376 Literal(address target, const RelocationHolder& rspec) 377 : _target(target), _rspec(rspec) {} 378 379 // If the target is far we'll need to load the ea of this to a 380 // register to reach it. Otherwise if near we can do PC-relative 381 // addressing. 382 address _target; 383 384 RelocationHolder _rspec; 385 }; 386 387 void assert_is_nonliteral() const NOT_DEBUG_RETURN; 388 void assert_is_literal() const NOT_DEBUG_RETURN; 389 390 // Discriminated union, based on _mode. 391 // - no_mode: uses dummy _nonliteral, for ease of copying. 392 // - literal: only _literal is used. 393 // - others: only _nonliteral is used. 394 enum mode _mode; 395 union { 396 Nonliteral _nonliteral; 397 Literal _literal; 398 }; 399 400 // Helper for copy constructor and assignment operator. 401 // Copy mode-relevant part of a into this. 402 void copy_data(const Address& a) { 403 assert(_mode == a._mode, "precondition"); 404 if (_mode == literal) { 405 new (&_literal) Literal(a._literal); 406 } else { 407 // non-literal mode or no_mode. 408 new (&_nonliteral) Nonliteral(a._nonliteral); 409 } 410 } 411 412 public: 413 // no_mode initializes _nonliteral for ease of copying. 414 Address() : 415 _mode(no_mode), 416 _nonliteral(noreg, noreg, 0) 417 {} 418 419 Address(Register r) : 420 _mode(base_plus_offset), 421 _nonliteral(r, noreg, 0) 422 {} 423 424 template<typename T, ENABLE_IF(std::is_integral<T>::value)> 425 Address(Register r, T o) : 426 _mode(base_plus_offset), 427 _nonliteral(r, noreg, o) 428 {} 429 430 Address(Register r, ByteSize disp) : Address(r, in_bytes(disp)) {} 431 432 Address(Register r, Register r1, extend ext = lsl()) : 433 _mode(base_plus_offset_reg), 434 _nonliteral(r, r1, 0, ext) 435 {} 436 437 Address(Pre p) : 438 _mode(pre), 439 _nonliteral(p.reg(), noreg, p.offset()) 440 {} 441 442 Address(Post p) : 443 _mode(p.is_postreg() ? post_reg : post), 444 _nonliteral(p.reg(), p.idx_reg(), p.offset()) 445 {} 446 447 Address(address target, const RelocationHolder& rspec) : 448 _mode(literal), 449 _literal(target, rspec) 450 {} 451 452 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); 453 454 Address(Register base, RegisterOrConstant index, extend ext = lsl()) { 455 if (index.is_register()) { 456 _mode = base_plus_offset_reg; 457 new (&_nonliteral) Nonliteral(base, index.as_register(), 0, ext); 458 } else { 459 guarantee(ext.option() == ext::uxtx, "should be"); 460 assert(index.is_constant(), "should be"); 461 _mode = base_plus_offset; 462 new (&_nonliteral) Nonliteral(base, 463 noreg, 464 index.as_constant() << ext.shift()); 465 } 466 } 467 468 Address(const Address& a) : _mode(a._mode) { copy_data(a); } 469 470 // Verify the value is trivially destructible regardless of mode, so our 471 // destructor can also be trivial, and so our assignment operator doesn't 472 // need to destruct the old value before copying over it. 473 static_assert(std::is_trivially_destructible<Literal>::value, "must be"); 474 static_assert(std::is_trivially_destructible<Nonliteral>::value, "must be"); 475 476 Address& operator=(const Address& a) { 477 _mode = a._mode; 478 copy_data(a); 479 return *this; 480 } 481 482 ~Address() = default; 483 484 Register base() const { 485 assert_is_nonliteral(); 486 return _nonliteral._base; 487 } 488 489 int64_t offset() const { 490 assert_is_nonliteral(); 491 return _nonliteral._offset; 492 } 493 494 Register index() const { 495 assert_is_nonliteral(); 496 return _nonliteral._index; 497 } 498 499 extend ext() const { 500 assert_is_nonliteral(); 501 return _nonliteral._ext; 502 } 503 504 mode getMode() const { 505 return _mode; 506 } 507 508 bool uses(Register reg) const { 509 switch (_mode) { 510 case literal: 511 case no_mode: 512 return false; 513 case base_plus_offset: 514 case base_plus_offset_reg: 515 case pre: 516 case post: 517 case post_reg: 518 return base() == reg || index() == reg; 519 default: 520 ShouldNotReachHere(); 521 return false; 522 } 523 } 524 525 address target() const { 526 assert_is_literal(); 527 return _literal._target; 528 } 529 530 const RelocationHolder& rspec() const { 531 assert_is_literal(); 532 return _literal._rspec; 533 } 534 535 void encode(Instruction_aarch64 *i) const { 536 i->f(0b111, 29, 27); 537 i->srf(base(), 5); 538 539 switch(_mode) { 540 case base_plus_offset: 541 { 542 unsigned size = i->get(31, 30); 543 if (i->get(26, 26) && i->get(23, 23)) { 544 // SIMD Q Type - Size = 128 bits 545 assert(size == 0, "bad size"); 546 size = 0b100; 547 } 548 assert(offset_ok_for_immed(offset(), size), 549 "must be, was: " INT64_FORMAT ", %d", offset(), size); 550 unsigned mask = (1 << size) - 1; 551 if (offset() < 0 || offset() & mask) { 552 i->f(0b00, 25, 24); 553 i->f(0, 21), i->f(0b00, 11, 10); 554 i->sf(offset(), 20, 12); 555 } else { 556 i->f(0b01, 25, 24); 557 i->f(checked_cast<unsigned>(offset() >> size), 21, 10); 558 } 559 } 560 break; 561 562 case base_plus_offset_reg: 563 { 564 i->f(0b00, 25, 24); 565 i->f(1, 21); 566 i->rf(index(), 16); 567 i->f(ext().option(), 15, 13); 568 unsigned size = i->get(31, 30); 569 if (i->get(26, 26) && i->get(23, 23)) { 570 // SIMD Q Type - Size = 128 bits 571 assert(size == 0, "bad size"); 572 size = 0b100; 573 } 574 if (size == 0) // It's a byte 575 i->f(ext().shift() >= 0, 12); 576 else { 577 guarantee(ext().shift() <= 0 || ext().shift() == (int)size, "bad shift"); 578 i->f(ext().shift() > 0, 12); 579 } 580 i->f(0b10, 11, 10); 581 } 582 break; 583 584 case pre: 585 i->f(0b00, 25, 24); 586 i->f(0, 21), i->f(0b11, 11, 10); 587 i->sf(offset(), 20, 12); 588 break; 589 590 case post: 591 i->f(0b00, 25, 24); 592 i->f(0, 21), i->f(0b01, 11, 10); 593 i->sf(offset(), 20, 12); 594 break; 595 596 default: 597 ShouldNotReachHere(); 598 } 599 } 600 601 void encode_pair(Instruction_aarch64 *i) const { 602 switch(_mode) { 603 case base_plus_offset: 604 i->f(0b010, 25, 23); 605 break; 606 case pre: 607 i->f(0b011, 25, 23); 608 break; 609 case post: 610 i->f(0b001, 25, 23); 611 break; 612 default: 613 ShouldNotReachHere(); 614 } 615 616 unsigned size; // Operand shift in 32-bit words 617 618 if (i->get(26, 26)) { // float 619 switch(i->get(31, 30)) { 620 case 0b10: 621 size = 2; break; 622 case 0b01: 623 size = 1; break; 624 case 0b00: 625 size = 0; break; 626 default: 627 ShouldNotReachHere(); 628 size = 0; // unreachable 629 } 630 } else { 631 size = i->get(31, 31); 632 } 633 634 size = 4 << size; 635 guarantee(offset() % size == 0, "bad offset"); 636 i->sf(offset() / size, 21, 15); 637 i->srf(base(), 5); 638 } 639 640 void encode_nontemporal_pair(Instruction_aarch64 *i) const { 641 guarantee(_mode == base_plus_offset, "Bad addressing mode for nontemporal op"); 642 i->f(0b000, 25, 23); 643 unsigned size = i->get(31, 31); 644 size = 4 << size; 645 guarantee(offset() % size == 0, "bad offset"); 646 i->sf(offset() / size, 21, 15); 647 i->srf(base(), 5); 648 } 649 650 void lea(MacroAssembler *, Register) const; 651 652 static bool offset_ok_for_immed(int64_t offset, uint shift); 653 654 static bool offset_ok_for_sve_immed(int64_t offset, int shift, int vl /* sve vector length */) { 655 if (offset % vl == 0) { 656 // Convert address offset into sve imm offset (MUL VL). 657 int64_t sve_offset = offset / vl; 658 int32_t range = 1 << (shift - 1); 659 if ((-range <= sve_offset) && (sve_offset < range)) { 660 // sve_offset can be encoded 661 return true; 662 } 663 } 664 return false; 665 } 666 }; 667 668 // Convenience classes 669 class RuntimeAddress: public Address { 670 671 public: 672 673 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {} 674 675 }; 676 677 class OopAddress: public Address { 678 679 public: 680 681 OopAddress(address target) : Address(target, relocInfo::oop_type){} 682 683 }; 684 685 class ExternalAddress: public Address { 686 private: 687 static relocInfo::relocType reloc_for_target(address target) { 688 // Sometimes ExternalAddress is used for values which aren't 689 // exactly addresses, like the card table base. 690 // external_word_type can't be used for values in the first page 691 // so just skip the reloc in that case. 692 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 693 } 694 695 public: 696 697 ExternalAddress(address target) : Address(target, reloc_for_target(target)) {} 698 699 }; 700 701 class InternalAddress: public Address { 702 703 public: 704 705 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} 706 }; 707 708 const int FPUStateSizeInWords = FloatRegister::number_of_registers * FloatRegister::save_slots_per_register; 709 710 typedef enum { 711 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM, 712 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM, 713 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM 714 } prfop; 715 716 class Assembler : public AbstractAssembler { 717 718 public: 719 720 #ifndef PRODUCT 721 static const uintptr_t asm_bp; 722 723 void emit_int32(jint x) { 724 if ((uintptr_t)pc() == asm_bp) 725 NOP(); 726 AbstractAssembler::emit_int32(x); 727 } 728 #else 729 void emit_int32(jint x) { 730 AbstractAssembler::emit_int32(x); 731 } 732 #endif 733 734 enum { instruction_size = 4 }; 735 736 //---< calculate length of instruction >--- 737 // We just use the values set above. 738 // instruction must start at passed address 739 static unsigned int instr_len(unsigned char *instr) { return instruction_size; } 740 741 //---< longest instructions >--- 742 static unsigned int instr_maxlen() { return instruction_size; } 743 744 Address adjust(Register base, int offset, bool preIncrement) { 745 if (preIncrement) 746 return Address(Pre(base, offset)); 747 else 748 return Address(Post(base, offset)); 749 } 750 751 Address pre(Register base, int offset) { 752 return adjust(base, offset, true); 753 } 754 755 Address post(Register base, int offset) { 756 return adjust(base, offset, false); 757 } 758 759 Address post(Register base, Register idx) { 760 return Address(Post(base, idx)); 761 } 762 763 static address locate_next_instruction(address inst); 764 765 #define f current_insn.f 766 #define sf current_insn.sf 767 #define rf current_insn.rf 768 #define srf current_insn.srf 769 #define zrf current_insn.zrf 770 #define prf current_insn.prf 771 #define pgrf current_insn.pgrf 772 773 typedef void (Assembler::* uncond_branch_insn)(address dest); 774 typedef void (Assembler::* compare_and_branch_insn)(Register Rt, address dest); 775 typedef void (Assembler::* test_and_branch_insn)(Register Rt, int bitpos, address dest); 776 typedef void (Assembler::* prefetch_insn)(address target, prfop); 777 778 void wrap_label(Label &L, uncond_branch_insn insn); 779 void wrap_label(Register r, Label &L, compare_and_branch_insn insn); 780 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn); 781 void wrap_label(Label &L, prfop, prefetch_insn insn); 782 783 // PC-rel. addressing 784 785 void adr(Register Rd, address dest); 786 void _adrp(Register Rd, address dest); 787 788 void adr(Register Rd, const Address &dest); 789 void _adrp(Register Rd, const Address &dest); 790 791 void adr(Register Rd, Label &L) { 792 wrap_label(Rd, L, &Assembler::Assembler::adr); 793 } 794 void _adrp(Register Rd, Label &L) { 795 wrap_label(Rd, L, &Assembler::_adrp); 796 } 797 798 void adrp(Register Rd, const Address &dest, uint64_t &offset) = delete; 799 800 void prfm(const Address &adr, prfop pfop = PLDL1KEEP); 801 802 #undef INSN 803 804 void add_sub_immediate(Instruction_aarch64 ¤t_insn, Register Rd, Register Rn, 805 unsigned uimm, int op, int negated_op); 806 807 // Add/subtract (immediate) 808 #define INSN(NAME, decode, negated) \ 809 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \ 810 starti; \ 811 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \ 812 zrf(Rd, 0), srf(Rn, 5); \ 813 } \ 814 \ 815 void NAME(Register Rd, Register Rn, unsigned imm) { \ 816 starti; \ 817 add_sub_immediate(current_insn, Rd, Rn, imm, decode, negated); \ 818 } 819 820 INSN(addsw, 0b001, 0b011); 821 INSN(subsw, 0b011, 0b001); 822 INSN(adds, 0b101, 0b111); 823 INSN(subs, 0b111, 0b101); 824 825 #undef INSN 826 827 #define INSN(NAME, decode, negated) \ 828 void NAME(Register Rd, Register Rn, unsigned imm) { \ 829 starti; \ 830 add_sub_immediate(current_insn, Rd, Rn, imm, decode, negated); \ 831 } 832 833 INSN(addw, 0b000, 0b010); 834 INSN(subw, 0b010, 0b000); 835 INSN(add, 0b100, 0b110); 836 INSN(sub, 0b110, 0b100); 837 838 #undef INSN 839 840 // Logical (immediate) 841 #define INSN(NAME, decode, is32) \ 842 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 843 starti; \ 844 uint32_t val = encode_logical_immediate(is32, imm); \ 845 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 846 srf(Rd, 0), zrf(Rn, 5); \ 847 } 848 849 INSN(andw, 0b000, true); 850 INSN(orrw, 0b001, true); 851 INSN(eorw, 0b010, true); 852 INSN(andr, 0b100, false); 853 INSN(orr, 0b101, false); 854 INSN(eor, 0b110, false); 855 856 #undef INSN 857 858 #define INSN(NAME, decode, is32) \ 859 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 860 starti; \ 861 uint32_t val = encode_logical_immediate(is32, imm); \ 862 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 863 zrf(Rd, 0), zrf(Rn, 5); \ 864 } 865 866 INSN(ands, 0b111, false); 867 INSN(andsw, 0b011, true); 868 869 #undef INSN 870 871 // Move wide (immediate) 872 #define INSN(NAME, opcode) \ 873 void NAME(Register Rd, unsigned imm, unsigned shift = 0) { \ 874 assert_cond((shift/16)*16 == shift); \ 875 starti; \ 876 f(opcode, 31, 29), f(0b100101, 28, 23), f(shift/16, 22, 21), \ 877 f(imm, 20, 5); \ 878 zrf(Rd, 0); \ 879 } 880 881 INSN(movnw, 0b000); 882 INSN(movzw, 0b010); 883 INSN(movkw, 0b011); 884 INSN(movn, 0b100); 885 INSN(movz, 0b110); 886 INSN(movk, 0b111); 887 888 #undef INSN 889 890 // Bitfield 891 #define INSN(NAME, opcode, size) \ 892 void NAME(Register Rd, Register Rn, unsigned immr, unsigned imms) { \ 893 starti; \ 894 guarantee(size == 1 || (immr < 32 && imms < 32), "incorrect immr/imms");\ 895 f(opcode, 31, 22), f(immr, 21, 16), f(imms, 15, 10); \ 896 zrf(Rn, 5), rf(Rd, 0); \ 897 } 898 899 INSN(sbfmw, 0b0001001100, 0); 900 INSN(bfmw, 0b0011001100, 0); 901 INSN(ubfmw, 0b0101001100, 0); 902 INSN(sbfm, 0b1001001101, 1); 903 INSN(bfm, 0b1011001101, 1); 904 INSN(ubfm, 0b1101001101, 1); 905 906 #undef INSN 907 908 // Extract 909 #define INSN(NAME, opcode, size) \ 910 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \ 911 starti; \ 912 guarantee(size == 1 || imms < 32, "incorrect imms"); \ 913 f(opcode, 31, 21), f(imms, 15, 10); \ 914 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 915 } 916 917 INSN(extrw, 0b00010011100, 0); 918 INSN(extr, 0b10010011110, 1); 919 920 #undef INSN 921 922 // The maximum range of a branch is fixed for the AArch64 923 // architecture. In debug mode we shrink it in order to test 924 // trampolines, but not so small that branches in the interpreter 925 // are out of range. 926 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M); 927 928 static bool reachable_from_branch_at(address branch, address target) { 929 return uabs(target - branch) < branch_range; 930 } 931 932 // Unconditional branch (immediate) 933 #define INSN(NAME, opcode) \ 934 void NAME(address dest) { \ 935 starti; \ 936 int64_t offset = (dest - pc()) >> 2; \ 937 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \ 938 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \ 939 } \ 940 void NAME(Label &L) { \ 941 wrap_label(L, &Assembler::NAME); \ 942 } \ 943 void NAME(const Address &dest); 944 945 INSN(b, 0); 946 INSN(bl, 1); 947 948 #undef INSN 949 950 // Compare & branch (immediate) 951 #define INSN(NAME, opcode) \ 952 void NAME(Register Rt, address dest) { \ 953 int64_t offset = (dest - pc()) >> 2; \ 954 starti; \ 955 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \ 956 } \ 957 void NAME(Register Rt, Label &L) { \ 958 wrap_label(Rt, L, &Assembler::NAME); \ 959 } 960 961 INSN(cbzw, 0b00110100); 962 INSN(cbnzw, 0b00110101); 963 INSN(cbz, 0b10110100); 964 INSN(cbnz, 0b10110101); 965 966 #undef INSN 967 968 // Test & branch (immediate) 969 #define INSN(NAME, opcode) \ 970 void NAME(Register Rt, int bitpos, address dest) { \ 971 int64_t offset = (dest - pc()) >> 2; \ 972 int b5 = bitpos >> 5; \ 973 bitpos &= 0x1f; \ 974 starti; \ 975 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \ 976 rf(Rt, 0); \ 977 } \ 978 void NAME(Register Rt, int bitpos, Label &L) { \ 979 wrap_label(Rt, bitpos, L, &Assembler::NAME); \ 980 } 981 982 INSN(tbz, 0b0110110); 983 INSN(tbnz, 0b0110111); 984 985 #undef INSN 986 987 // Conditional branch (immediate) 988 enum Condition 989 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV}; 990 991 void br(Condition cond, address dest) { 992 int64_t offset = (dest - pc()) >> 2; 993 starti; 994 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0); 995 } 996 997 #define INSN(NAME, cond) \ 998 void NAME(address dest) { \ 999 br(cond, dest); \ 1000 } 1001 1002 INSN(beq, EQ); 1003 INSN(bne, NE); 1004 INSN(bhs, HS); 1005 INSN(bcs, CS); 1006 INSN(blo, LO); 1007 INSN(bcc, CC); 1008 INSN(bmi, MI); 1009 INSN(bpl, PL); 1010 INSN(bvs, VS); 1011 INSN(bvc, VC); 1012 INSN(bhi, HI); 1013 INSN(bls, LS); 1014 INSN(bge, GE); 1015 INSN(blt, LT); 1016 INSN(bgt, GT); 1017 INSN(ble, LE); 1018 INSN(bal, AL); 1019 INSN(bnv, NV); 1020 1021 void br(Condition cc, Label &L); 1022 1023 #undef INSN 1024 1025 // Exception generation 1026 void generate_exception(int opc, int op2, int LL, unsigned imm) { 1027 starti; 1028 f(0b11010100, 31, 24); 1029 f(opc, 23, 21), f(imm, 20, 5), f(op2, 4, 2), f(LL, 1, 0); 1030 } 1031 1032 #define INSN(NAME, opc, op2, LL) \ 1033 void NAME(unsigned imm) { \ 1034 generate_exception(opc, op2, LL, imm); \ 1035 } 1036 1037 INSN(svc, 0b000, 0, 0b01); 1038 INSN(hvc, 0b000, 0, 0b10); 1039 INSN(smc, 0b000, 0, 0b11); 1040 INSN(brk, 0b001, 0, 0b00); 1041 INSN(hlt, 0b010, 0, 0b00); 1042 INSN(dcps1, 0b101, 0, 0b01); 1043 INSN(dcps2, 0b101, 0, 0b10); 1044 INSN(dcps3, 0b101, 0, 0b11); 1045 1046 #undef INSN 1047 1048 // System 1049 void system(int op0, int op1, int CRn, int CRm, int op2, 1050 Register rt = dummy_reg) 1051 { 1052 starti; 1053 f(0b11010101000, 31, 21); 1054 f(op0, 20, 19); 1055 f(op1, 18, 16); 1056 f(CRn, 15, 12); 1057 f(CRm, 11, 8); 1058 f(op2, 7, 5); 1059 rf(rt, 0); 1060 } 1061 1062 // Hint instructions 1063 1064 #define INSN(NAME, crm, op2) \ 1065 void NAME() { \ 1066 system(0b00, 0b011, 0b0010, crm, op2); \ 1067 } 1068 1069 INSN(nop, 0b000, 0b0000); 1070 INSN(yield, 0b000, 0b0001); 1071 INSN(wfe, 0b000, 0b0010); 1072 INSN(wfi, 0b000, 0b0011); 1073 INSN(sev, 0b000, 0b0100); 1074 INSN(sevl, 0b000, 0b0101); 1075 1076 INSN(autia1716, 0b0001, 0b100); 1077 INSN(autiasp, 0b0011, 0b101); 1078 INSN(autiaz, 0b0011, 0b100); 1079 INSN(autib1716, 0b0001, 0b110); 1080 INSN(autibsp, 0b0011, 0b111); 1081 INSN(autibz, 0b0011, 0b110); 1082 INSN(pacia1716, 0b0001, 0b000); 1083 INSN(paciasp, 0b0011, 0b001); 1084 INSN(paciaz, 0b0011, 0b000); 1085 INSN(pacib1716, 0b0001, 0b010); 1086 INSN(pacibsp, 0b0011, 0b011); 1087 INSN(pacibz, 0b0011, 0b010); 1088 INSN(xpaclri, 0b0000, 0b111); 1089 1090 #undef INSN 1091 1092 // we only provide mrs and msr for the special purpose system 1093 // registers where op1 (instr[20:19]) == 11 1094 // n.b msr has L (instr[21]) == 0 mrs has L == 1 1095 1096 void msr(int op1, int CRn, int CRm, int op2, Register rt) { 1097 starti; 1098 f(0b1101010100011, 31, 19); 1099 f(op1, 18, 16); 1100 f(CRn, 15, 12); 1101 f(CRm, 11, 8); 1102 f(op2, 7, 5); 1103 // writing zr is ok 1104 zrf(rt, 0); 1105 } 1106 1107 void mrs(int op1, int CRn, int CRm, int op2, Register rt) { 1108 starti; 1109 f(0b1101010100111, 31, 19); 1110 f(op1, 18, 16); 1111 f(CRn, 15, 12); 1112 f(CRm, 11, 8); 1113 f(op2, 7, 5); 1114 // reading to zr is a mistake 1115 rf(rt, 0); 1116 } 1117 1118 enum barrier {OSHLD = 0b0001, OSHST, OSH, NSHLD=0b0101, NSHST, NSH, 1119 ISHLD = 0b1001, ISHST, ISH, LD=0b1101, ST, SY}; 1120 1121 void dsb(barrier imm) { 1122 system(0b00, 0b011, 0b00011, imm, 0b100); 1123 } 1124 1125 void dmb(barrier imm) { 1126 system(0b00, 0b011, 0b00011, imm, 0b101); 1127 } 1128 1129 void isb() { 1130 system(0b00, 0b011, 0b00011, SY, 0b110); 1131 } 1132 1133 void sys(int op1, int CRn, int CRm, int op2, 1134 Register rt = as_Register(0b11111)) { 1135 system(0b01, op1, CRn, CRm, op2, rt); 1136 } 1137 1138 // Only implement operations accessible from EL0 or higher, i.e., 1139 // op1 CRn CRm op2 1140 // IC IVAU 3 7 5 1 1141 // DC CVAC 3 7 10 1 1142 // DC CVAP 3 7 12 1 1143 // DC CVAU 3 7 11 1 1144 // DC CIVAC 3 7 14 1 1145 // DC ZVA 3 7 4 1 1146 // So only deal with the CRm field. 1147 enum icache_maintenance {IVAU = 0b0101}; 1148 enum dcache_maintenance {CVAC = 0b1010, CVAP = 0b1100, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100}; 1149 1150 void dc(dcache_maintenance cm, Register Rt) { 1151 sys(0b011, 0b0111, cm, 0b001, Rt); 1152 } 1153 1154 void ic(icache_maintenance cm, Register Rt) { 1155 sys(0b011, 0b0111, cm, 0b001, Rt); 1156 } 1157 1158 // A more convenient access to dmb for our purposes 1159 enum Membar_mask_bits { 1160 // We can use ISH for a barrier because the Arm ARM says "This 1161 // architecture assumes that all Processing Elements that use the 1162 // same operating system or hypervisor are in the same Inner 1163 // Shareable shareability domain." 1164 StoreStore = ISHST, 1165 LoadStore = ISHLD, 1166 LoadLoad = ISHLD, 1167 StoreLoad = ISH, 1168 AnyAny = ISH 1169 }; 1170 1171 void membar(Membar_mask_bits order_constraint) { 1172 dmb(Assembler::barrier(order_constraint)); 1173 } 1174 1175 // Unconditional branch (register) 1176 1177 void branch_reg(int OP, int A, int M, Register RN, Register RM) { 1178 starti; 1179 f(0b1101011, 31, 25); 1180 f(OP, 24, 21); 1181 f(0b111110000, 20, 12); 1182 f(A, 11, 11); 1183 f(M, 10, 10); 1184 rf(RN, 5); 1185 rf(RM, 0); 1186 } 1187 1188 #define INSN(NAME, opc) \ 1189 void NAME(Register RN) { \ 1190 branch_reg(opc, 0, 0, RN, r0); \ 1191 } 1192 1193 INSN(br, 0b0000); 1194 INSN(blr, 0b0001); 1195 INSN(ret, 0b0010); 1196 1197 void ret(void *p); // This forces a compile-time error for ret(0) 1198 1199 #undef INSN 1200 1201 #define INSN(NAME, opc) \ 1202 void NAME() { \ 1203 branch_reg(opc, 0, 0, dummy_reg, r0); \ 1204 } 1205 1206 INSN(eret, 0b0100); 1207 INSN(drps, 0b0101); 1208 1209 #undef INSN 1210 1211 #define INSN(NAME, M) \ 1212 void NAME() { \ 1213 branch_reg(0b0010, 1, M, dummy_reg, dummy_reg); \ 1214 } 1215 1216 INSN(retaa, 0); 1217 INSN(retab, 1); 1218 1219 #undef INSN 1220 1221 #define INSN(NAME, OP, M) \ 1222 void NAME(Register rn) { \ 1223 branch_reg(OP, 1, M, rn, dummy_reg); \ 1224 } 1225 1226 INSN(braaz, 0b0000, 0); 1227 INSN(brabz, 0b0000, 1); 1228 INSN(blraaz, 0b0001, 0); 1229 INSN(blrabz, 0b0001, 1); 1230 1231 #undef INSN 1232 1233 #define INSN(NAME, OP, M) \ 1234 void NAME(Register rn, Register rm) { \ 1235 branch_reg(OP, 1, M, rn, rm); \ 1236 } 1237 1238 INSN(braa, 0b1000, 0); 1239 INSN(brab, 0b1000, 1); 1240 INSN(blraa, 0b1001, 0); 1241 INSN(blrab, 0b1001, 1); 1242 1243 #undef INSN 1244 1245 // Load/store exclusive 1246 enum operand_size { byte, halfword, word, xword }; 1247 1248 void load_store_exclusive(Register Rs, Register Rt1, Register Rt2, 1249 Register Rn, enum operand_size sz, int op, bool ordered) { 1250 starti; 1251 f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21); 1252 rf(Rs, 16), f(ordered, 15), zrf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0); 1253 } 1254 1255 void load_exclusive(Register dst, Register addr, 1256 enum operand_size sz, bool ordered) { 1257 load_store_exclusive(dummy_reg, dst, dummy_reg, addr, 1258 sz, 0b010, ordered); 1259 } 1260 1261 void store_exclusive(Register status, Register new_val, Register addr, 1262 enum operand_size sz, bool ordered) { 1263 load_store_exclusive(status, new_val, dummy_reg, addr, 1264 sz, 0b000, ordered); 1265 } 1266 1267 #define INSN4(NAME, sz, op, o0) /* Four registers */ \ 1268 void NAME(Register Rs, Register Rt1, Register Rt2, Register Rn) { \ 1269 guarantee(Rs != Rn && Rs != Rt1 && Rs != Rt2, "unpredictable instruction"); \ 1270 load_store_exclusive(Rs, Rt1, Rt2, Rn, sz, op, o0); \ 1271 } 1272 1273 #define INSN3(NAME, sz, op, o0) /* Three registers */ \ 1274 void NAME(Register Rs, Register Rt, Register Rn) { \ 1275 guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1276 load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \ 1277 } 1278 1279 #define INSN2(NAME, sz, op, o0) /* Two registers */ \ 1280 void NAME(Register Rt, Register Rn) { \ 1281 load_store_exclusive(dummy_reg, Rt, dummy_reg, \ 1282 Rn, sz, op, o0); \ 1283 } 1284 1285 #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \ 1286 void NAME(Register Rt1, Register Rt2, Register Rn) { \ 1287 guarantee(Rt1 != Rt2, "unpredictable instruction"); \ 1288 load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0); \ 1289 } 1290 1291 // bytes 1292 INSN3(stxrb, byte, 0b000, 0); 1293 INSN3(stlxrb, byte, 0b000, 1); 1294 INSN2(ldxrb, byte, 0b010, 0); 1295 INSN2(ldaxrb, byte, 0b010, 1); 1296 INSN2(stlrb, byte, 0b100, 1); 1297 INSN2(ldarb, byte, 0b110, 1); 1298 1299 // halfwords 1300 INSN3(stxrh, halfword, 0b000, 0); 1301 INSN3(stlxrh, halfword, 0b000, 1); 1302 INSN2(ldxrh, halfword, 0b010, 0); 1303 INSN2(ldaxrh, halfword, 0b010, 1); 1304 INSN2(stlrh, halfword, 0b100, 1); 1305 INSN2(ldarh, halfword, 0b110, 1); 1306 1307 // words 1308 INSN3(stxrw, word, 0b000, 0); 1309 INSN3(stlxrw, word, 0b000, 1); 1310 INSN4(stxpw, word, 0b001, 0); 1311 INSN4(stlxpw, word, 0b001, 1); 1312 INSN2(ldxrw, word, 0b010, 0); 1313 INSN2(ldaxrw, word, 0b010, 1); 1314 INSN2(stlrw, word, 0b100, 1); 1315 INSN2(ldarw, word, 0b110, 1); 1316 // pairs of words 1317 INSN_FOO(ldxpw, word, 0b011, 0); 1318 INSN_FOO(ldaxpw, word, 0b011, 1); 1319 1320 // xwords 1321 INSN3(stxr, xword, 0b000, 0); 1322 INSN3(stlxr, xword, 0b000, 1); 1323 INSN4(stxp, xword, 0b001, 0); 1324 INSN4(stlxp, xword, 0b001, 1); 1325 INSN2(ldxr, xword, 0b010, 0); 1326 INSN2(ldaxr, xword, 0b010, 1); 1327 INSN2(stlr, xword, 0b100, 1); 1328 INSN2(ldar, xword, 0b110, 1); 1329 // pairs of xwords 1330 INSN_FOO(ldxp, xword, 0b011, 0); 1331 INSN_FOO(ldaxp, xword, 0b011, 1); 1332 1333 #undef INSN2 1334 #undef INSN3 1335 #undef INSN4 1336 #undef INSN_FOO 1337 1338 // 8.1 Compare and swap extensions 1339 void lse_cas(Register Rs, Register Rt, Register Rn, 1340 enum operand_size sz, bool a, bool r, bool not_pair) { 1341 starti; 1342 if (! not_pair) { // Pair 1343 assert(sz == word || sz == xword, "invalid size"); 1344 /* The size bit is in bit 30, not 31 */ 1345 sz = (operand_size)(sz == word ? 0b00:0b01); 1346 } 1347 f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21); 1348 zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0); 1349 } 1350 1351 // CAS 1352 #define INSN(NAME, a, r) \ 1353 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1354 assert(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1355 lse_cas(Rs, Rt, Rn, sz, a, r, true); \ 1356 } 1357 INSN(cas, false, false) 1358 INSN(casa, true, false) 1359 INSN(casl, false, true) 1360 INSN(casal, true, true) 1361 #undef INSN 1362 1363 // CASP 1364 #define INSN(NAME, a, r) \ 1365 void NAME(operand_size sz, Register Rs, Register Rs1, \ 1366 Register Rt, Register Rt1, Register Rn) { \ 1367 assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 && \ 1368 Rs->successor() == Rs1 && Rt->successor() == Rt1 && \ 1369 Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers"); \ 1370 lse_cas(Rs, Rt, Rn, sz, a, r, false); \ 1371 } 1372 INSN(casp, false, false) 1373 INSN(caspa, true, false) 1374 INSN(caspl, false, true) 1375 INSN(caspal, true, true) 1376 #undef INSN 1377 1378 // 8.1 Atomic operations 1379 void lse_atomic(Register Rs, Register Rt, Register Rn, 1380 enum operand_size sz, int op1, int op2, bool a, bool r) { 1381 starti; 1382 f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21); 1383 zrf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0); 1384 } 1385 1386 #define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \ 1387 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1388 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \ 1389 } \ 1390 void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1391 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \ 1392 } \ 1393 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1394 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \ 1395 } \ 1396 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\ 1397 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \ 1398 } 1399 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000); 1400 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001); 1401 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010); 1402 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011); 1403 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100); 1404 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101); 1405 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110); 1406 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111); 1407 INSN(swp, swpa, swpl, swpal, 1, 0b000); 1408 #undef INSN 1409 1410 // Load register (literal) 1411 #define INSN(NAME, opc, V) \ 1412 void NAME(Register Rt, address dest) { \ 1413 int64_t offset = (dest - pc()) >> 2; \ 1414 starti; \ 1415 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1416 sf(offset, 23, 5); \ 1417 rf(Rt, 0); \ 1418 } \ 1419 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \ 1420 InstructionMark im(this); \ 1421 guarantee(rtype == relocInfo::internal_word_type, \ 1422 "only internal_word_type relocs make sense here"); \ 1423 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \ 1424 NAME(Rt, dest); \ 1425 } \ 1426 void NAME(Register Rt, Label &L) { \ 1427 wrap_label(Rt, L, &Assembler::NAME); \ 1428 } 1429 1430 INSN(ldrw, 0b00, 0); 1431 INSN(ldr, 0b01, 0); 1432 INSN(ldrsw, 0b10, 0); 1433 1434 #undef INSN 1435 1436 #define INSN(NAME, opc, V) \ 1437 void NAME(FloatRegister Rt, address dest) { \ 1438 int64_t offset = (dest - pc()) >> 2; \ 1439 starti; \ 1440 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1441 sf(offset, 23, 5); \ 1442 rf(as_Register(Rt), 0); \ 1443 } 1444 1445 INSN(ldrs, 0b00, 1); 1446 INSN(ldrd, 0b01, 1); 1447 INSN(ldrq, 0b10, 1); 1448 1449 #undef INSN 1450 1451 #define INSN(NAME, size, opc) \ 1452 void NAME(FloatRegister Rt, Register Rn) { \ 1453 starti; \ 1454 f(size, 31, 30), f(0b111100, 29, 24), f(opc, 23, 22), f(0, 21); \ 1455 f(0, 20, 12), f(0b01, 11, 10); \ 1456 rf(Rn, 5), rf(as_Register(Rt), 0); \ 1457 } 1458 1459 INSN(ldrs, 0b10, 0b01); 1460 INSN(ldrd, 0b11, 0b01); 1461 INSN(ldrq, 0b00, 0b11); 1462 1463 #undef INSN 1464 1465 1466 #define INSN(NAME, opc, V) \ 1467 void NAME(address dest, prfop op = PLDL1KEEP) { \ 1468 int64_t offset = (dest - pc()) >> 2; \ 1469 starti; \ 1470 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1471 sf(offset, 23, 5); \ 1472 f(op, 4, 0); \ 1473 } \ 1474 void NAME(Label &L, prfop op = PLDL1KEEP) { \ 1475 wrap_label(L, op, &Assembler::NAME); \ 1476 } 1477 1478 INSN(prfm, 0b11, 0); 1479 1480 #undef INSN 1481 1482 // Load/store 1483 void ld_st1(int opc, int p1, int V, int L, 1484 Register Rt1, Register Rt2, Address adr, bool no_allocate) { 1485 starti; 1486 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22); 1487 zrf(Rt2, 10), zrf(Rt1, 0); 1488 if (no_allocate) { 1489 adr.encode_nontemporal_pair(¤t_insn); 1490 } else { 1491 adr.encode_pair(¤t_insn); 1492 } 1493 } 1494 1495 // Load/store register pair (offset) 1496 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1497 void NAME(Register Rt1, Register Rt2, Address adr) { \ 1498 ld_st1(size, p1, V, L, Rt1, Rt2, adr, no_allocate); \ 1499 } 1500 1501 INSN(stpw, 0b00, 0b101, 0, 0, false); 1502 INSN(ldpw, 0b00, 0b101, 0, 1, false); 1503 INSN(ldpsw, 0b01, 0b101, 0, 1, false); 1504 INSN(stp, 0b10, 0b101, 0, 0, false); 1505 INSN(ldp, 0b10, 0b101, 0, 1, false); 1506 1507 // Load/store no-allocate pair (offset) 1508 INSN(stnpw, 0b00, 0b101, 0, 0, true); 1509 INSN(ldnpw, 0b00, 0b101, 0, 1, true); 1510 INSN(stnp, 0b10, 0b101, 0, 0, true); 1511 INSN(ldnp, 0b10, 0b101, 0, 1, true); 1512 1513 #undef INSN 1514 1515 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1516 void NAME(FloatRegister Rt1, FloatRegister Rt2, Address adr) { \ 1517 ld_st1(size, p1, V, L, \ 1518 as_Register(Rt1), as_Register(Rt2), adr, no_allocate); \ 1519 } 1520 1521 INSN(stps, 0b00, 0b101, 1, 0, false); 1522 INSN(ldps, 0b00, 0b101, 1, 1, false); 1523 INSN(stpd, 0b01, 0b101, 1, 0, false); 1524 INSN(ldpd, 0b01, 0b101, 1, 1, false); 1525 INSN(stpq, 0b10, 0b101, 1, 0, false); 1526 INSN(ldpq, 0b10, 0b101, 1, 1, false); 1527 1528 #undef INSN 1529 1530 // Load/store register (all modes) 1531 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) { 1532 starti; 1533 1534 f(V, 26); // general reg? 1535 zrf(Rt, 0); 1536 1537 // Encoding for literal loads is done here (rather than pushed 1538 // down into Address::encode) because the encoding of this 1539 // instruction is too different from all of the other forms to 1540 // make it worth sharing. 1541 if (adr.getMode() == Address::literal) { 1542 assert(size == 0b10 || size == 0b11, "bad operand size in ldr"); 1543 assert(op == 0b01, "literal form can only be used with loads"); 1544 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24); 1545 int64_t offset = (adr.target() - pc()) >> 2; 1546 sf(offset, 23, 5); 1547 code_section()->relocate(pc(), adr.rspec()); 1548 return; 1549 } 1550 1551 f(size, 31, 30); 1552 f(op, 23, 22); // str 1553 adr.encode(¤t_insn); 1554 } 1555 1556 #define INSN(NAME, size, op) \ 1557 void NAME(Register Rt, const Address &adr) { \ 1558 ld_st2(Rt, adr, size, op); \ 1559 } \ 1560 1561 INSN(str, 0b11, 0b00); 1562 INSN(strw, 0b10, 0b00); 1563 INSN(strb, 0b00, 0b00); 1564 INSN(strh, 0b01, 0b00); 1565 1566 INSN(ldr, 0b11, 0b01); 1567 INSN(ldrw, 0b10, 0b01); 1568 INSN(ldrb, 0b00, 0b01); 1569 INSN(ldrh, 0b01, 0b01); 1570 1571 INSN(ldrsb, 0b00, 0b10); 1572 INSN(ldrsbw, 0b00, 0b11); 1573 INSN(ldrsh, 0b01, 0b10); 1574 INSN(ldrshw, 0b01, 0b11); 1575 INSN(ldrsw, 0b10, 0b10); 1576 1577 #undef INSN 1578 1579 #define INSN(NAME, size, op) \ 1580 void NAME(FloatRegister Rt, const Address &adr) { \ 1581 ld_st2(as_Register(Rt), adr, size, op, 1); \ 1582 } 1583 1584 INSN(strd, 0b11, 0b00); 1585 INSN(strs, 0b10, 0b00); 1586 INSN(ldrd, 0b11, 0b01); 1587 INSN(ldrs, 0b10, 0b01); 1588 INSN(strq, 0b00, 0b10); 1589 INSN(ldrq, 0x00, 0b11); 1590 1591 #undef INSN 1592 1593 /* SIMD extensions 1594 * 1595 * We just use FloatRegister in the following. They are exactly the same 1596 * as SIMD registers. 1597 */ 1598 public: 1599 1600 enum SIMD_Arrangement { 1601 T8B, T16B, T4H, T8H, T2S, T4S, T1D, T2D, T1Q, INVALID_ARRANGEMENT 1602 }; 1603 1604 enum SIMD_RegVariant { 1605 B, H, S, D, Q, INVALID 1606 }; 1607 1608 private: 1609 1610 static SIMD_Arrangement _esize2arrangement_table[9][2]; 1611 static SIMD_RegVariant _esize2regvariant[9]; 1612 1613 public: 1614 1615 static SIMD_Arrangement esize2arrangement(unsigned esize, bool isQ); 1616 static SIMD_RegVariant elemType_to_regVariant(BasicType bt); 1617 static SIMD_RegVariant elemBytes_to_regVariant(unsigned esize); 1618 // Return the corresponding bits for different SIMD_RegVariant value. 1619 static unsigned regVariant_to_elemBits(SIMD_RegVariant T); 1620 1621 enum shift_kind { LSL, LSR, ASR, ROR }; 1622 1623 void op_shifted_reg(Instruction_aarch64 ¤t_insn, unsigned decode, 1624 enum shift_kind kind, unsigned shift, 1625 unsigned size, unsigned op) { 1626 f(size, 31); 1627 f(op, 30, 29); 1628 f(decode, 28, 24); 1629 f(shift, 15, 10); 1630 f(kind, 23, 22); 1631 } 1632 1633 // Logical (shifted register) 1634 #define INSN(NAME, size, op, N) \ 1635 void NAME(Register Rd, Register Rn, Register Rm, \ 1636 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1637 starti; \ 1638 guarantee(size == 1 || shift < 32, "incorrect shift"); \ 1639 f(N, 21); \ 1640 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1641 op_shifted_reg(current_insn, 0b01010, kind, shift, size, op); \ 1642 } 1643 1644 INSN(andr, 1, 0b00, 0); 1645 INSN(orr, 1, 0b01, 0); 1646 INSN(eor, 1, 0b10, 0); 1647 INSN(ands, 1, 0b11, 0); 1648 INSN(andw, 0, 0b00, 0); 1649 INSN(orrw, 0, 0b01, 0); 1650 INSN(eorw, 0, 0b10, 0); 1651 INSN(andsw, 0, 0b11, 0); 1652 1653 #undef INSN 1654 1655 #define INSN(NAME, size, op, N) \ 1656 void NAME(Register Rd, Register Rn, Register Rm, \ 1657 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1658 starti; \ 1659 f(N, 21); \ 1660 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1661 op_shifted_reg(current_insn, 0b01010, kind, shift, size, op); \ 1662 } \ 1663 \ 1664 /* These instructions have no immediate form. Provide an overload so \ 1665 that if anyone does try to use an immediate operand -- this has \ 1666 happened! -- we'll get a compile-time error. */ \ 1667 void NAME(Register Rd, Register Rn, unsigned imm, \ 1668 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1669 assert(false, " can't be used with immediate operand"); \ 1670 } 1671 1672 INSN(bic, 1, 0b00, 1); 1673 INSN(orn, 1, 0b01, 1); 1674 INSN(eon, 1, 0b10, 1); 1675 INSN(bics, 1, 0b11, 1); 1676 INSN(bicw, 0, 0b00, 1); 1677 INSN(ornw, 0, 0b01, 1); 1678 INSN(eonw, 0, 0b10, 1); 1679 INSN(bicsw, 0, 0b11, 1); 1680 1681 #undef INSN 1682 1683 #ifdef _WIN64 1684 // In MSVC, `mvn` is defined as a macro and it affects compilation 1685 #undef mvn 1686 #endif 1687 1688 // Aliases for short forms of orn 1689 void mvn(Register Rd, Register Rm, 1690 enum shift_kind kind = LSL, unsigned shift = 0) { 1691 orn(Rd, zr, Rm, kind, shift); 1692 } 1693 1694 void mvnw(Register Rd, Register Rm, 1695 enum shift_kind kind = LSL, unsigned shift = 0) { 1696 ornw(Rd, zr, Rm, kind, shift); 1697 } 1698 1699 // Add/subtract (shifted register) 1700 #define INSN(NAME, size, op) \ 1701 void NAME(Register Rd, Register Rn, Register Rm, \ 1702 enum shift_kind kind, unsigned shift = 0) { \ 1703 starti; \ 1704 f(0, 21); \ 1705 assert_cond(kind != ROR); \ 1706 guarantee(size == 1 || shift < 32, "incorrect shift");\ 1707 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \ 1708 op_shifted_reg(current_insn, 0b01011, kind, shift, size, op); \ 1709 } 1710 1711 INSN(add, 1, 0b000); 1712 INSN(sub, 1, 0b10); 1713 INSN(addw, 0, 0b000); 1714 INSN(subw, 0, 0b10); 1715 1716 INSN(adds, 1, 0b001); 1717 INSN(subs, 1, 0b11); 1718 INSN(addsw, 0, 0b001); 1719 INSN(subsw, 0, 0b11); 1720 1721 #undef INSN 1722 1723 // Add/subtract (extended register) 1724 #define INSN(NAME, op) \ 1725 void NAME(Register Rd, Register Rn, Register Rm, \ 1726 ext::operation option, int amount = 0) { \ 1727 starti; \ 1728 zrf(Rm, 16), srf(Rn, 5), srf(Rd, 0); \ 1729 add_sub_extended_reg(current_insn, op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1730 } 1731 1732 void add_sub_extended_reg(Instruction_aarch64 ¤t_insn, unsigned op, unsigned decode, 1733 Register Rd, Register Rn, Register Rm, 1734 unsigned opt, ext::operation option, unsigned imm) { 1735 guarantee(imm <= 4, "shift amount must be <= 4"); 1736 f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21); 1737 f(option, 15, 13), f(imm, 12, 10); 1738 } 1739 1740 INSN(addw, 0b000); 1741 INSN(subw, 0b010); 1742 INSN(add, 0b100); 1743 INSN(sub, 0b110); 1744 1745 #undef INSN 1746 1747 #define INSN(NAME, op) \ 1748 void NAME(Register Rd, Register Rn, Register Rm, \ 1749 ext::operation option, int amount = 0) { \ 1750 starti; \ 1751 zrf(Rm, 16), srf(Rn, 5), zrf(Rd, 0); \ 1752 add_sub_extended_reg(current_insn, op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1753 } 1754 1755 INSN(addsw, 0b001); 1756 INSN(subsw, 0b011); 1757 INSN(adds, 0b101); 1758 INSN(subs, 0b111); 1759 1760 #undef INSN 1761 1762 // Aliases for short forms of add and sub 1763 #define INSN(NAME) \ 1764 void NAME(Register Rd, Register Rn, Register Rm) { \ 1765 if (Rd == sp || Rn == sp) \ 1766 NAME(Rd, Rn, Rm, ext::uxtx); \ 1767 else \ 1768 NAME(Rd, Rn, Rm, LSL); \ 1769 } 1770 1771 INSN(addw); 1772 INSN(subw); 1773 INSN(add); 1774 INSN(sub); 1775 1776 INSN(addsw); 1777 INSN(subsw); 1778 INSN(adds); 1779 INSN(subs); 1780 1781 #undef INSN 1782 1783 // Add/subtract (with carry) 1784 void add_sub_carry(unsigned op, Register Rd, Register Rn, Register Rm) { 1785 starti; 1786 f(op, 31, 29); 1787 f(0b11010000, 28, 21); 1788 f(0b000000, 15, 10); 1789 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); 1790 } 1791 1792 #define INSN(NAME, op) \ 1793 void NAME(Register Rd, Register Rn, Register Rm) { \ 1794 add_sub_carry(op, Rd, Rn, Rm); \ 1795 } 1796 1797 INSN(adcw, 0b000); 1798 INSN(adcsw, 0b001); 1799 INSN(sbcw, 0b010); 1800 INSN(sbcsw, 0b011); 1801 INSN(adc, 0b100); 1802 INSN(adcs, 0b101); 1803 INSN(sbc, 0b110); 1804 INSN(sbcs, 0b111); 1805 1806 #undef INSN 1807 1808 // Conditional compare (both kinds) 1809 void conditional_compare(unsigned op, int o1, int o2, int o3, 1810 Register Rn, unsigned imm5, unsigned nzcv, 1811 unsigned cond) { 1812 starti; 1813 f(op, 31, 29); 1814 f(0b11010010, 28, 21); 1815 f(cond, 15, 12); 1816 f(o1, 11); 1817 f(o2, 10); 1818 f(o3, 4); 1819 f(nzcv, 3, 0); 1820 f(imm5, 20, 16), zrf(Rn, 5); 1821 } 1822 1823 #define INSN(NAME, op) \ 1824 void NAME(Register Rn, Register Rm, int imm, Condition cond) { \ 1825 int regNumber = (Rm == zr ? 31 : Rm->encoding()); \ 1826 conditional_compare(op, 0, 0, 0, Rn, regNumber, imm, cond); \ 1827 } \ 1828 \ 1829 void NAME(Register Rn, int imm5, int imm, Condition cond) { \ 1830 conditional_compare(op, 1, 0, 0, Rn, imm5, imm, cond); \ 1831 } 1832 1833 INSN(ccmnw, 0b001); 1834 INSN(ccmpw, 0b011); 1835 INSN(ccmn, 0b101); 1836 INSN(ccmp, 0b111); 1837 1838 #undef INSN 1839 1840 // Conditional select 1841 void conditional_select(unsigned op, unsigned op2, 1842 Register Rd, Register Rn, Register Rm, 1843 unsigned cond) { 1844 starti; 1845 f(op, 31, 29); 1846 f(0b11010100, 28, 21); 1847 f(cond, 15, 12); 1848 f(op2, 11, 10); 1849 zrf(Rm, 16), zrf(Rn, 5), rf(Rd, 0); 1850 } 1851 1852 #define INSN(NAME, op, op2) \ 1853 void NAME(Register Rd, Register Rn, Register Rm, Condition cond) { \ 1854 conditional_select(op, op2, Rd, Rn, Rm, cond); \ 1855 } 1856 1857 INSN(cselw, 0b000, 0b00); 1858 INSN(csincw, 0b000, 0b01); 1859 INSN(csinvw, 0b010, 0b00); 1860 INSN(csnegw, 0b010, 0b01); 1861 INSN(csel, 0b100, 0b00); 1862 INSN(csinc, 0b100, 0b01); 1863 INSN(csinv, 0b110, 0b00); 1864 INSN(csneg, 0b110, 0b01); 1865 1866 #undef INSN 1867 1868 // Data processing 1869 void data_processing(Instruction_aarch64 ¤t_insn, unsigned op29, unsigned opcode, 1870 Register Rd, Register Rn) { 1871 f(op29, 31, 29), f(0b11010110, 28, 21); 1872 f(opcode, 15, 10); 1873 rf(Rn, 5), rf(Rd, 0); 1874 } 1875 1876 // (1 source) 1877 #define INSN(NAME, op29, opcode2, opcode) \ 1878 void NAME(Register Rd, Register Rn) { \ 1879 starti; \ 1880 f(opcode2, 20, 16); \ 1881 data_processing(current_insn, op29, opcode, Rd, Rn); \ 1882 } 1883 1884 INSN(rbitw, 0b010, 0b00000, 0b00000); 1885 INSN(rev16w, 0b010, 0b00000, 0b00001); 1886 INSN(revw, 0b010, 0b00000, 0b00010); 1887 INSN(clzw, 0b010, 0b00000, 0b00100); 1888 INSN(clsw, 0b010, 0b00000, 0b00101); 1889 1890 INSN(rbit, 0b110, 0b00000, 0b00000); 1891 INSN(rev16, 0b110, 0b00000, 0b00001); 1892 INSN(rev32, 0b110, 0b00000, 0b00010); 1893 INSN(rev, 0b110, 0b00000, 0b00011); 1894 INSN(clz, 0b110, 0b00000, 0b00100); 1895 INSN(cls, 0b110, 0b00000, 0b00101); 1896 1897 // PAC instructions 1898 INSN(pacia, 0b110, 0b00001, 0b00000); 1899 INSN(pacib, 0b110, 0b00001, 0b00001); 1900 INSN(pacda, 0b110, 0b00001, 0b00010); 1901 INSN(pacdb, 0b110, 0b00001, 0b00011); 1902 INSN(autia, 0b110, 0b00001, 0b00100); 1903 INSN(autib, 0b110, 0b00001, 0b00101); 1904 INSN(autda, 0b110, 0b00001, 0b00110); 1905 INSN(autdb, 0b110, 0b00001, 0b00111); 1906 1907 #undef INSN 1908 1909 #define INSN(NAME, op29, opcode2, opcode) \ 1910 void NAME(Register Rd) { \ 1911 starti; \ 1912 f(opcode2, 20, 16); \ 1913 data_processing(current_insn, op29, opcode, Rd, dummy_reg); \ 1914 } 1915 1916 // PAC instructions (with zero modifier) 1917 INSN(paciza, 0b110, 0b00001, 0b01000); 1918 INSN(pacizb, 0b110, 0b00001, 0b01001); 1919 INSN(pacdza, 0b110, 0b00001, 0b01010); 1920 INSN(pacdzb, 0b110, 0b00001, 0b01011); 1921 INSN(autiza, 0b110, 0b00001, 0b01100); 1922 INSN(autizb, 0b110, 0b00001, 0b01101); 1923 INSN(autdza, 0b110, 0b00001, 0b01110); 1924 INSN(autdzb, 0b110, 0b00001, 0b01111); 1925 INSN(xpaci, 0b110, 0b00001, 0b10000); 1926 INSN(xpacd, 0b110, 0b00001, 0b10001); 1927 1928 #undef INSN 1929 1930 // Data-processing (2 source) 1931 #define INSN(NAME, op29, opcode) \ 1932 void NAME(Register Rd, Register Rn, Register Rm) { \ 1933 starti; \ 1934 rf(Rm, 16); \ 1935 data_processing(current_insn, op29, opcode, Rd, Rn); \ 1936 } 1937 1938 INSN(udivw, 0b000, 0b000010); 1939 INSN(sdivw, 0b000, 0b000011); 1940 INSN(lslvw, 0b000, 0b001000); 1941 INSN(lsrvw, 0b000, 0b001001); 1942 INSN(asrvw, 0b000, 0b001010); 1943 INSN(rorvw, 0b000, 0b001011); 1944 1945 INSN(udiv, 0b100, 0b000010); 1946 INSN(sdiv, 0b100, 0b000011); 1947 INSN(lslv, 0b100, 0b001000); 1948 INSN(lsrv, 0b100, 0b001001); 1949 INSN(asrv, 0b100, 0b001010); 1950 INSN(rorv, 0b100, 0b001011); 1951 1952 #undef INSN 1953 1954 // Data-processing (3 source) 1955 void data_processing(unsigned op54, unsigned op31, unsigned o0, 1956 Register Rd, Register Rn, Register Rm, 1957 Register Ra) { 1958 starti; 1959 f(op54, 31, 29), f(0b11011, 28, 24); 1960 f(op31, 23, 21), f(o0, 15); 1961 zrf(Rm, 16), zrf(Ra, 10), zrf(Rn, 5), zrf(Rd, 0); 1962 } 1963 1964 #define INSN(NAME, op54, op31, o0) \ 1965 void NAME(Register Rd, Register Rn, Register Rm, Register Ra) { \ 1966 data_processing(op54, op31, o0, Rd, Rn, Rm, Ra); \ 1967 } 1968 1969 INSN(maddw, 0b000, 0b000, 0); 1970 INSN(msubw, 0b000, 0b000, 1); 1971 INSN(madd, 0b100, 0b000, 0); 1972 INSN(msub, 0b100, 0b000, 1); 1973 INSN(smaddl, 0b100, 0b001, 0); 1974 INSN(smsubl, 0b100, 0b001, 1); 1975 INSN(umaddl, 0b100, 0b101, 0); 1976 INSN(umsubl, 0b100, 0b101, 1); 1977 1978 #undef INSN 1979 1980 #define INSN(NAME, op54, op31, o0) \ 1981 void NAME(Register Rd, Register Rn, Register Rm) { \ 1982 data_processing(op54, op31, o0, Rd, Rn, Rm, as_Register(31)); \ 1983 } 1984 1985 INSN(smulh, 0b100, 0b010, 0); 1986 INSN(umulh, 0b100, 0b110, 0); 1987 1988 #undef INSN 1989 1990 // Floating-point data-processing (1 source) 1991 void data_processing(unsigned type, unsigned opcode, 1992 FloatRegister Vd, FloatRegister Vn) { 1993 starti; 1994 f(0b000, 31, 29); 1995 f(0b11110, 28, 24); 1996 f(type, 23, 22), f(1, 21), f(opcode, 20, 15), f(0b10000, 14, 10); 1997 rf(Vn, 5), rf(Vd, 0); 1998 } 1999 2000 #define INSN(NAME, type, opcode) \ 2001 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2002 data_processing(type, opcode, Vd, Vn); \ 2003 } 2004 2005 INSN(fmovs, 0b00, 0b000000); 2006 INSN(fabss, 0b00, 0b000001); 2007 INSN(fnegs, 0b00, 0b000010); 2008 INSN(fsqrts, 0b00, 0b000011); 2009 INSN(fcvts, 0b00, 0b000101); // Single-precision to double-precision 2010 INSN(fcvths, 0b11, 0b000100); // Half-precision to single-precision 2011 INSN(fcvtsh, 0b00, 0b000111); // Single-precision to half-precision 2012 2013 INSN(fmovd, 0b01, 0b000000); 2014 INSN(fabsd, 0b01, 0b000001); 2015 INSN(fnegd, 0b01, 0b000010); 2016 INSN(fsqrtd, 0b01, 0b000011); 2017 INSN(fcvtd, 0b01, 0b000100); // Double-precision to single-precision 2018 2019 private: 2020 void _fcvt_narrow_extend(FloatRegister Vd, SIMD_Arrangement Ta, 2021 FloatRegister Vn, SIMD_Arrangement Tb, bool do_extend) { 2022 assert((do_extend && (Tb >> 1) + 1 == (Ta >> 1)) 2023 || (!do_extend && (Ta >> 1) + 1 == (Tb >> 1)), "Incompatible arrangement"); 2024 starti; 2025 int op30 = (do_extend ? Tb : Ta) & 1; 2026 int op22 = ((do_extend ? Ta : Tb) >> 1) & 1; 2027 f(0, 31), f(op30, 30), f(0b0011100, 29, 23), f(op22, 22); 2028 f(0b100001011, 21, 13), f(do_extend ? 1 : 0, 12), f(0b10, 11, 10); 2029 rf(Vn, 5), rf(Vd, 0); 2030 } 2031 2032 public: 2033 void fcvtl(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) { 2034 assert(Tb == T4H || Tb == T8H|| Tb == T2S || Tb == T4S, "invalid arrangement"); 2035 _fcvt_narrow_extend(Vd, Ta, Vn, Tb, true); 2036 } 2037 2038 void fcvtn(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) { 2039 assert(Ta == T4H || Ta == T8H|| Ta == T2S || Ta == T4S, "invalid arrangement"); 2040 _fcvt_narrow_extend(Vd, Ta, Vn, Tb, false); 2041 } 2042 2043 #undef INSN 2044 2045 // Floating-point data-processing (2 source) 2046 void data_processing(unsigned op31, unsigned type, unsigned opcode, 2047 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { 2048 starti; 2049 f(op31, 31, 29); 2050 f(0b11110, 28, 24); 2051 f(type, 23, 22), f(1, 21), f(opcode, 15, 10); 2052 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 2053 } 2054 2055 #define INSN(NAME, op31, type, opcode) \ 2056 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \ 2057 data_processing(op31, type, opcode, Vd, Vn, Vm); \ 2058 } 2059 2060 INSN(fabds, 0b011, 0b10, 0b110101); 2061 INSN(fmuls, 0b000, 0b00, 0b000010); 2062 INSN(fdivs, 0b000, 0b00, 0b000110); 2063 INSN(fadds, 0b000, 0b00, 0b001010); 2064 INSN(fsubs, 0b000, 0b00, 0b001110); 2065 INSN(fmaxs, 0b000, 0b00, 0b010010); 2066 INSN(fmins, 0b000, 0b00, 0b010110); 2067 INSN(fnmuls, 0b000, 0b00, 0b100010); 2068 2069 INSN(fabdd, 0b011, 0b11, 0b110101); 2070 INSN(fmuld, 0b000, 0b01, 0b000010); 2071 INSN(fdivd, 0b000, 0b01, 0b000110); 2072 INSN(faddd, 0b000, 0b01, 0b001010); 2073 INSN(fsubd, 0b000, 0b01, 0b001110); 2074 INSN(fmaxd, 0b000, 0b01, 0b010010); 2075 INSN(fmind, 0b000, 0b01, 0b010110); 2076 INSN(fnmuld, 0b000, 0b01, 0b100010); 2077 2078 #undef INSN 2079 2080 // Floating-point data-processing (3 source) 2081 void data_processing(unsigned op31, unsigned type, unsigned o1, unsigned o0, 2082 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, 2083 FloatRegister Va) { 2084 starti; 2085 f(op31, 31, 29); 2086 f(0b11111, 28, 24); 2087 f(type, 23, 22), f(o1, 21), f(o0, 15); 2088 rf(Vm, 16), rf(Va, 10), rf(Vn, 5), rf(Vd, 0); 2089 } 2090 2091 #define INSN(NAME, op31, type, o1, o0) \ 2092 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, \ 2093 FloatRegister Va) { \ 2094 data_processing(op31, type, o1, o0, Vd, Vn, Vm, Va); \ 2095 } 2096 2097 INSN(fmadds, 0b000, 0b00, 0, 0); 2098 INSN(fmsubs, 0b000, 0b00, 0, 1); 2099 INSN(fnmadds, 0b000, 0b00, 1, 0); 2100 INSN(fnmsubs, 0b000, 0b00, 1, 1); 2101 2102 INSN(fmaddd, 0b000, 0b01, 0, 0); 2103 INSN(fmsubd, 0b000, 0b01, 0, 1); 2104 INSN(fnmaddd, 0b000, 0b01, 1, 0); 2105 INSN(fnmsub, 0b000, 0b01, 1, 1); 2106 2107 #undef INSN 2108 2109 // Floating-point conditional select 2110 void fp_conditional_select(unsigned op31, unsigned type, 2111 unsigned op1, unsigned op2, 2112 Condition cond, FloatRegister Vd, 2113 FloatRegister Vn, FloatRegister Vm) { 2114 starti; 2115 f(op31, 31, 29); 2116 f(0b11110, 28, 24); 2117 f(type, 23, 22); 2118 f(op1, 21, 21); 2119 f(op2, 11, 10); 2120 f(cond, 15, 12); 2121 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 2122 } 2123 2124 #define INSN(NAME, op31, type, op1, op2) \ 2125 void NAME(FloatRegister Vd, FloatRegister Vn, \ 2126 FloatRegister Vm, Condition cond) { \ 2127 fp_conditional_select(op31, type, op1, op2, cond, Vd, Vn, Vm); \ 2128 } 2129 2130 INSN(fcsels, 0b000, 0b00, 0b1, 0b11); 2131 INSN(fcseld, 0b000, 0b01, 0b1, 0b11); 2132 2133 #undef INSN 2134 2135 // Conversion between floating-point and integer 2136 void float_int_convert(unsigned sflag, unsigned ftype, 2137 unsigned rmode, unsigned opcode, 2138 Register Rd, Register Rn) { 2139 starti; 2140 f(sflag, 31); 2141 f(0b00, 30, 29); 2142 f(0b11110, 28, 24); 2143 f(ftype, 23, 22), f(1, 21), f(rmode, 20, 19); 2144 f(opcode, 18, 16), f(0b000000, 15, 10); 2145 zrf(Rn, 5), zrf(Rd, 0); 2146 } 2147 2148 #define INSN(NAME, sflag, ftype, rmode, opcode) \ 2149 void NAME(Register Rd, FloatRegister Vn) { \ 2150 float_int_convert(sflag, ftype, rmode, opcode, Rd, as_Register(Vn)); \ 2151 } 2152 2153 INSN(fcvtzsw, 0b0, 0b00, 0b11, 0b000); 2154 INSN(fcvtzs, 0b1, 0b00, 0b11, 0b000); 2155 INSN(fcvtzdw, 0b0, 0b01, 0b11, 0b000); 2156 INSN(fcvtzd, 0b1, 0b01, 0b11, 0b000); 2157 2158 // RoundToNearestTiesAway 2159 INSN(fcvtassw, 0b0, 0b00, 0b00, 0b100); // float -> signed word 2160 INSN(fcvtasd, 0b1, 0b01, 0b00, 0b100); // double -> signed xword 2161 2162 // RoundTowardsNegative 2163 INSN(fcvtmssw, 0b0, 0b00, 0b10, 0b000); // float -> signed word 2164 INSN(fcvtmsd, 0b1, 0b01, 0b10, 0b000); // double -> signed xword 2165 2166 INSN(fmovs, 0b0, 0b00, 0b00, 0b110); 2167 INSN(fmovd, 0b1, 0b01, 0b00, 0b110); 2168 2169 INSN(fmovhid, 0b1, 0b10, 0b01, 0b110); 2170 2171 #undef INSN 2172 2173 #define INSN(NAME, sflag, type, rmode, opcode) \ 2174 void NAME(FloatRegister Vd, Register Rn) { \ 2175 float_int_convert(sflag, type, rmode, opcode, as_Register(Vd), Rn); \ 2176 } 2177 2178 INSN(fmovs, 0b0, 0b00, 0b00, 0b111); 2179 INSN(fmovd, 0b1, 0b01, 0b00, 0b111); 2180 2181 INSN(scvtfws, 0b0, 0b00, 0b00, 0b010); 2182 INSN(scvtfs, 0b1, 0b00, 0b00, 0b010); 2183 INSN(scvtfwd, 0b0, 0b01, 0b00, 0b010); 2184 INSN(scvtfd, 0b1, 0b01, 0b00, 0b010); 2185 2186 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b111); 2187 2188 #undef INSN 2189 2190 private: 2191 void _xcvtf_vector_integer(bool is_unsigned, SIMD_Arrangement T, 2192 FloatRegister Rd, FloatRegister Rn) { 2193 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); 2194 starti; 2195 f(0, 31), f(T & 1, 30), f(is_unsigned ? 1 : 0, 29); 2196 f(0b011100, 28, 23), f((T >> 1) & 1, 22), f(0b100001110110, 21, 10); 2197 rf(Rn, 5), rf(Rd, 0); 2198 } 2199 2200 public: 2201 2202 void scvtfv(SIMD_Arrangement T, FloatRegister Rd, FloatRegister Rn) { 2203 _xcvtf_vector_integer(/* is_unsigned */ false, T, Rd, Rn); 2204 } 2205 2206 // Floating-point compare 2207 void float_compare(unsigned op31, unsigned type, 2208 unsigned op, unsigned op2, 2209 FloatRegister Vn, FloatRegister Vm = as_FloatRegister(0)) { 2210 starti; 2211 f(op31, 31, 29); 2212 f(0b11110, 28, 24); 2213 f(type, 23, 22), f(1, 21); 2214 f(op, 15, 14), f(0b1000, 13, 10), f(op2, 4, 0); 2215 rf(Vn, 5), rf(Vm, 16); 2216 } 2217 2218 2219 #define INSN(NAME, op31, type, op, op2) \ 2220 void NAME(FloatRegister Vn, FloatRegister Vm) { \ 2221 float_compare(op31, type, op, op2, Vn, Vm); \ 2222 } 2223 2224 #define INSN1(NAME, op31, type, op, op2) \ 2225 void NAME(FloatRegister Vn, double d) { \ 2226 assert_cond(d == 0.0); \ 2227 float_compare(op31, type, op, op2, Vn); \ 2228 } 2229 2230 INSN(fcmps, 0b000, 0b00, 0b00, 0b00000); 2231 INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000); 2232 // INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000); 2233 // INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000); 2234 2235 INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000); 2236 INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000); 2237 // INSN(fcmped, 0b000, 0b01, 0b00, 0b10000); 2238 // INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000); 2239 2240 #undef INSN 2241 #undef INSN1 2242 2243 // Floating-point compare. 3-registers versions (scalar). 2244 #define INSN(NAME, sz, e) \ 2245 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \ 2246 starti; \ 2247 f(0b01111110, 31, 24), f(e, 23), f(sz, 22), f(1, 21), rf(Vm, 16); \ 2248 f(0b111011, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2249 } \ 2250 2251 INSN(facged, 1, 0); // facge-double 2252 INSN(facges, 0, 0); // facge-single 2253 INSN(facgtd, 1, 1); // facgt-double 2254 INSN(facgts, 0, 1); // facgt-single 2255 2256 #undef INSN 2257 2258 // Floating-point Move (immediate) 2259 private: 2260 unsigned pack(double value); 2261 2262 void fmov_imm(FloatRegister Vn, double value, unsigned size) { 2263 starti; 2264 f(0b00011110, 31, 24), f(size, 23, 22), f(1, 21); 2265 f(pack(value), 20, 13), f(0b10000000, 12, 5); 2266 rf(Vn, 0); 2267 } 2268 2269 public: 2270 2271 void fmovs(FloatRegister Vn, double value) { 2272 if (value) 2273 fmov_imm(Vn, value, 0b00); 2274 else 2275 movi(Vn, T2S, 0); 2276 } 2277 void fmovd(FloatRegister Vn, double value) { 2278 if (value) 2279 fmov_imm(Vn, value, 0b01); 2280 else 2281 movi(Vn, T1D, 0); 2282 } 2283 2284 // Floating-point data-processing (1 source) 2285 2286 // Floating-point rounding 2287 // type: half-precision = 11 2288 // single = 00 2289 // double = 01 2290 // rmode: A = Away = 100 2291 // I = current = 111 2292 // M = MinusInf = 010 2293 // N = eveN = 000 2294 // P = PlusInf = 001 2295 // X = eXact = 110 2296 // Z = Zero = 011 2297 void float_round(unsigned type, unsigned rmode, FloatRegister Rd, FloatRegister Rn) { 2298 starti; 2299 f(0b00011110, 31, 24); 2300 f(type, 23, 22); 2301 f(0b1001, 21, 18); 2302 f(rmode, 17, 15); 2303 f(0b10000, 14, 10); 2304 rf(Rn, 5), rf(Rd, 0); 2305 } 2306 #define INSN(NAME, type, rmode) \ 2307 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2308 float_round(type, rmode, Vd, Vn); \ 2309 } 2310 2311 public: 2312 INSN(frintah, 0b11, 0b100); 2313 INSN(frintih, 0b11, 0b111); 2314 INSN(frintmh, 0b11, 0b010); 2315 INSN(frintnh, 0b11, 0b000); 2316 INSN(frintph, 0b11, 0b001); 2317 INSN(frintxh, 0b11, 0b110); 2318 INSN(frintzh, 0b11, 0b011); 2319 2320 INSN(frintas, 0b00, 0b100); 2321 INSN(frintis, 0b00, 0b111); 2322 INSN(frintms, 0b00, 0b010); 2323 INSN(frintns, 0b00, 0b000); 2324 INSN(frintps, 0b00, 0b001); 2325 INSN(frintxs, 0b00, 0b110); 2326 INSN(frintzs, 0b00, 0b011); 2327 2328 INSN(frintad, 0b01, 0b100); 2329 INSN(frintid, 0b01, 0b111); 2330 INSN(frintmd, 0b01, 0b010); 2331 INSN(frintnd, 0b01, 0b000); 2332 INSN(frintpd, 0b01, 0b001); 2333 INSN(frintxd, 0b01, 0b110); 2334 INSN(frintzd, 0b01, 0b011); 2335 #undef INSN 2336 2337 private: 2338 static short SIMD_Size_in_bytes[]; 2339 2340 public: 2341 #define INSN(NAME, op) \ 2342 void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \ 2343 ld_st2(as_Register(Rt), adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \ 2344 } 2345 2346 INSN(ldr, 1); 2347 INSN(str, 0); 2348 2349 #undef INSN 2350 2351 private: 2352 2353 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, int op1, int op2) { 2354 starti; 2355 f(0,31), f((int)T & 1, 30); 2356 f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12); 2357 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2358 } 2359 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2360 int imm, int op1, int op2, int regs) { 2361 2362 bool replicate = op2 >> 2 == 3; 2363 // post-index value (imm) is formed differently for replicate/non-replicate ld* instructions 2364 int expectedImmediate = replicate ? regs * (1 << (T >> 1)) : SIMD_Size_in_bytes[T] * regs; 2365 guarantee(T < T1Q , "incorrect arrangement"); 2366 guarantee(imm == expectedImmediate, "bad offset"); 2367 starti; 2368 f(0,31), f((int)T & 1, 30); 2369 f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12); 2370 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2371 } 2372 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2373 Register Xm, int op1, int op2) { 2374 starti; 2375 f(0,31), f((int)T & 1, 30); 2376 f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12); 2377 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2378 } 2379 2380 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2, int regs) { 2381 switch (a.getMode()) { 2382 case Address::base_plus_offset: 2383 guarantee(a.offset() == 0, "no offset allowed here"); 2384 ld_st(Vt, T, a.base(), op1, op2); 2385 break; 2386 case Address::post: 2387 ld_st(Vt, T, a.base(), checked_cast<int>(a.offset()), op1, op2, regs); 2388 break; 2389 case Address::post_reg: 2390 ld_st(Vt, T, a.base(), a.index(), op1, op2); 2391 break; 2392 default: 2393 ShouldNotReachHere(); 2394 } 2395 } 2396 2397 // Single-structure load/store method (all addressing variants) 2398 void ld_st(FloatRegister Vt, SIMD_RegVariant T, int index, Address a, 2399 int op1, int op2, int regs) { 2400 int expectedImmediate = (regVariant_to_elemBits(T) >> 3) * regs; 2401 int sVal = (T < D) ? (index >> (2 - T)) & 0x01 : 0; 2402 int opcode = (T < D) ? (T << 2) : ((T & 0x02) << 2); 2403 int size = (T < D) ? (index & (0x3 << T)) : 1; // only care about low 2b 2404 Register Xn = a.base(); 2405 int Rm; 2406 2407 switch (a.getMode()) { 2408 case Address::base_plus_offset: 2409 guarantee(a.offset() == 0, "no offset allowed here"); 2410 Rm = 0; 2411 break; 2412 case Address::post: 2413 guarantee(a.offset() == expectedImmediate, "bad offset"); 2414 op1 |= 0b100; 2415 Rm = 0b11111; 2416 break; 2417 case Address::post_reg: 2418 op1 |= 0b100; 2419 Rm = a.index()->encoding(); 2420 break; 2421 default: 2422 ShouldNotReachHere(); 2423 Rm = 0; // unreachable 2424 } 2425 2426 starti; 2427 f(0,31), f((index >> (3 - T)), 30); 2428 f(op1, 29, 21), f(Rm, 20, 16), f(op2 | opcode | sVal, 15, 12); 2429 f(size, 11, 10), srf(Xn, 5), rf(Vt, 0); 2430 } 2431 2432 public: 2433 2434 #define INSN1(NAME, op1, op2) \ 2435 void NAME(FloatRegister Vt, SIMD_Arrangement T, const Address &a) { \ 2436 ld_st(Vt, T, a, op1, op2, 1); \ 2437 } 2438 2439 #define INSN2(NAME, op1, op2) \ 2440 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_Arrangement T, const Address &a) { \ 2441 assert(Vt->successor() == Vt2, "Registers must be ordered"); \ 2442 ld_st(Vt, T, a, op1, op2, 2); \ 2443 } 2444 2445 #define INSN3(NAME, op1, op2) \ 2446 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2447 SIMD_Arrangement T, const Address &a) { \ 2448 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3, \ 2449 "Registers must be ordered"); \ 2450 ld_st(Vt, T, a, op1, op2, 3); \ 2451 } 2452 2453 #define INSN4(NAME, op1, op2) \ 2454 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2455 FloatRegister Vt4, SIMD_Arrangement T, const Address &a) { \ 2456 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3 && \ 2457 Vt3->successor() == Vt4, "Registers must be ordered"); \ 2458 ld_st(Vt, T, a, op1, op2, 4); \ 2459 } 2460 2461 INSN1(ld1, 0b001100010, 0b0111); 2462 INSN2(ld1, 0b001100010, 0b1010); 2463 INSN3(ld1, 0b001100010, 0b0110); 2464 INSN4(ld1, 0b001100010, 0b0010); 2465 2466 INSN2(ld2, 0b001100010, 0b1000); 2467 INSN3(ld3, 0b001100010, 0b0100); 2468 INSN4(ld4, 0b001100010, 0b0000); 2469 2470 INSN1(st1, 0b001100000, 0b0111); 2471 INSN2(st1, 0b001100000, 0b1010); 2472 INSN3(st1, 0b001100000, 0b0110); 2473 INSN4(st1, 0b001100000, 0b0010); 2474 2475 INSN2(st2, 0b001100000, 0b1000); 2476 INSN3(st3, 0b001100000, 0b0100); 2477 INSN4(st4, 0b001100000, 0b0000); 2478 2479 INSN1(ld1r, 0b001101010, 0b1100); 2480 INSN2(ld2r, 0b001101011, 0b1100); 2481 INSN3(ld3r, 0b001101010, 0b1110); 2482 INSN4(ld4r, 0b001101011, 0b1110); 2483 2484 #undef INSN1 2485 #undef INSN2 2486 #undef INSN3 2487 #undef INSN4 2488 2489 // Handle common single-structure ld/st parameter sanity checks 2490 // for all variations (1 to 4) of SIMD reigster inputs. This 2491 // method will call the routine that generates the opcode. 2492 template<typename R, typename... Rx> 2493 void ldst_sstr(SIMD_RegVariant T, int index, const Address &a, 2494 int op1, int op2, R firstReg, Rx... otherRegs) { 2495 const FloatRegister vtSet[] = { firstReg, otherRegs... }; 2496 const int regCount = sizeof...(otherRegs) + 1; 2497 assert(index >= 0 && (T <= D) && ((T == B && index <= 15) || 2498 (T == H && index <= 7) || (T == S && index <= 3) || 2499 (T == D && index <= 1)), "invalid index"); 2500 assert(regCount >= 1 && regCount <= 4, "illegal register count"); 2501 2502 // Check to make sure when multiple SIMD registers are used 2503 // that they are in successive order. 2504 for (int i = 0; i < regCount - 1; i++) { 2505 assert(vtSet[i]->successor() == vtSet[i + 1], 2506 "Registers must be ordered"); 2507 } 2508 2509 ld_st(firstReg, T, index, a, op1, op2, regCount); 2510 } 2511 2512 // Define a set of INSN1/2/3/4 macros to handle single-structure 2513 // load/store instructions. 2514 #define INSN1(NAME, op1, op2) \ 2515 void NAME(FloatRegister Vt, SIMD_RegVariant T, int index, \ 2516 const Address &a) { \ 2517 ldst_sstr(T, index, a, op1, op2, Vt); \ 2518 } 2519 2520 #define INSN2(NAME, op1, op2) \ 2521 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_RegVariant T, \ 2522 int index, const Address &a) { \ 2523 ldst_sstr(T, index, a, op1, op2, Vt, Vt2); \ 2524 } 2525 2526 #define INSN3(NAME, op1, op2) \ 2527 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2528 SIMD_RegVariant T, int index, const Address &a) { \ 2529 ldst_sstr(T, index, a, op1, op2, Vt, Vt2, Vt3); \ 2530 } 2531 2532 #define INSN4(NAME, op1, op2) \ 2533 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2534 FloatRegister Vt4, SIMD_RegVariant T, int index, \ 2535 const Address &a) { \ 2536 ldst_sstr(T, index, a, op1, op2, Vt, Vt2, Vt3, Vt4); \ 2537 } 2538 2539 INSN1(st1, 0b001101000, 0b0000); 2540 INSN2(st2, 0b001101001, 0b0000); 2541 INSN3(st3, 0b001101000, 0b0010); 2542 INSN4(st4, 0b001101001, 0b0010); 2543 2544 #undef INSN1 2545 #undef INSN2 2546 #undef INSN3 2547 #undef INSN4 2548 2549 #define INSN(NAME, opc) \ 2550 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2551 starti; \ 2552 assert(T == T8B || T == T16B, "must be T8B or T16B"); \ 2553 f(0, 31), f((int)T & 1, 30), f(opc, 29, 21); \ 2554 rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2555 } 2556 2557 INSN(eor, 0b101110001); 2558 INSN(orr, 0b001110101); 2559 INSN(andr, 0b001110001); 2560 INSN(bic, 0b001110011); 2561 INSN(bif, 0b101110111); 2562 INSN(bit, 0b101110101); 2563 INSN(bsl, 0b101110011); 2564 INSN(orn, 0b001110111); 2565 2566 #undef INSN 2567 2568 // Advanced SIMD three different 2569 #define INSN(NAME, opc, opc2, acceptT2D) \ 2570 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2571 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2572 if (!acceptT2D) guarantee(T != T2D, "incorrect arrangement"); \ 2573 starti; \ 2574 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2575 f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \ 2576 rf(Vn, 5), rf(Vd, 0); \ 2577 } 2578 2579 INSN(addv, 0, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2580 INSN(subv, 1, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2581 INSN(uqsubv, 1, 0b001011, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2582 INSN(mulv, 0, 0b100111, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2583 INSN(mlav, 0, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2584 INSN(mlsv, 1, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2585 INSN(sshl, 0, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2586 INSN(ushl, 1, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2587 INSN(addpv, 0, 0b101111, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2588 INSN(smullv, 0, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2589 INSN(umullv, 1, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2590 INSN(umlalv, 1, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2591 INSN(maxv, 0, 0b011001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2592 INSN(minv, 0, 0b011011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2593 INSN(smaxp, 0, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2594 INSN(sminp, 0, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2595 2596 #undef INSN 2597 2598 // Advanced SIMD across lanes 2599 #define INSN(NAME, opc, opc2, accepted) \ 2600 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2601 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2602 if (accepted < 3) guarantee(T != T2D, "incorrect arrangement"); \ 2603 if (accepted < 2) guarantee(T != T2S, "incorrect arrangement"); \ 2604 if (accepted < 1) guarantee(T == T8B || T == T16B, "incorrect arrangement"); \ 2605 starti; \ 2606 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2607 f((int)T >> 1, 23, 22), f(opc2, 21, 10); \ 2608 rf(Vn, 5), rf(Vd, 0); \ 2609 } 2610 2611 INSN(absr, 0, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2612 INSN(negr, 1, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2613 INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2614 INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2615 INSN(smaxv, 0, 0b110000101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2616 INSN(umaxv, 1, 0b110000101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2617 INSN(sminv, 0, 0b110001101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2618 INSN(uminv, 1, 0b110001101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2619 INSN(cls, 0, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2620 INSN(clz, 1, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2621 INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2622 INSN(uaddlp, 1, 0b100000001010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2623 INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2624 2625 #undef INSN 2626 2627 #define INSN(NAME, opc) \ 2628 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2629 starti; \ 2630 assert(T == T4S, "arrangement must be T4S"); \ 2631 f(0, 31), f((int)T & 1, 30), f(0b101110, 29, 24), f(opc, 23), \ 2632 f(T == T4S ? 0 : 1, 22), f(0b110000111110, 21, 10); rf(Vn, 5), rf(Vd, 0); \ 2633 } 2634 2635 INSN(fmaxv, 0); 2636 INSN(fminv, 1); 2637 2638 #undef INSN 2639 2640 // Advanced SIMD modified immediate 2641 #define INSN(NAME, op0, cmode0) \ 2642 void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \ 2643 unsigned cmode = cmode0; \ 2644 unsigned op = op0; \ 2645 starti; \ 2646 assert(lsl == 0 || \ 2647 ((T == T4H || T == T8H) && lsl == 8) || \ 2648 ((T == T2S || T == T4S) && ((lsl >> 3) < 4) && ((lsl & 7) == 0)), "invalid shift");\ 2649 cmode |= lsl >> 2; \ 2650 if (T == T4H || T == T8H) cmode |= 0b1000; \ 2651 if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \ 2652 assert(op == 0 && cmode0 == 0, "must be MOVI"); \ 2653 cmode = 0b1110; \ 2654 if (T == T1D || T == T2D) op = 1; \ 2655 } \ 2656 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \ 2657 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \ 2658 rf(Vd, 0); \ 2659 } 2660 2661 INSN(movi, 0, 0); 2662 INSN(orri, 0, 1); 2663 INSN(mvni, 1, 0); 2664 INSN(bici, 1, 1); 2665 2666 #undef INSN 2667 2668 #define INSN(NAME, op, cmode) \ 2669 void NAME(FloatRegister Vd, SIMD_Arrangement T, double imm) { \ 2670 unsigned imm8 = pack(imm); \ 2671 starti; \ 2672 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \ 2673 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \ 2674 rf(Vd, 0); \ 2675 } 2676 2677 INSN(fmovs, 0, 0b1111); 2678 INSN(fmovd, 1, 0b1111); 2679 2680 #undef INSN 2681 2682 // Advanced SIMD three same 2683 #define INSN(NAME, op1, op2, op3) \ 2684 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2685 starti; \ 2686 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2687 f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01110, 28, 24), f(op2, 23); \ 2688 f(T==T2D ? 1:0, 22); f(1, 21), rf(Vm, 16), f(op3, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2689 } 2690 2691 INSN(fabd, 1, 1, 0b110101); 2692 INSN(fadd, 0, 0, 0b110101); 2693 INSN(fdiv, 1, 0, 0b111111); 2694 INSN(faddp, 1, 0, 0b110101); 2695 INSN(fmul, 1, 0, 0b110111); 2696 INSN(fsub, 0, 1, 0b110101); 2697 INSN(fmla, 0, 0, 0b110011); 2698 INSN(fmls, 0, 1, 0b110011); 2699 INSN(fmax, 0, 0, 0b111101); 2700 INSN(fmin, 0, 1, 0b111101); 2701 INSN(facgt, 1, 1, 0b111011); 2702 2703 #undef INSN 2704 2705 // AdvSIMD vector compare 2706 void cm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { 2707 starti; 2708 assert(T != T1Q && T != T1D, "incorrect arrangement"); 2709 int cond_op; 2710 switch (cond) { 2711 case EQ: cond_op = 0b110001; break; 2712 case GT: cond_op = 0b000110; break; 2713 case GE: cond_op = 0b000111; break; 2714 case HI: cond_op = 0b100110; break; 2715 case HS: cond_op = 0b100111; break; 2716 default: 2717 ShouldNotReachHere(); 2718 break; 2719 } 2720 2721 f(0, 31), f((int)T & 1, 30), f((cond_op >> 5) & 1, 29); 2722 f(0b01110, 28, 24), f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16); 2723 f(cond_op & 0b11111, 15, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); 2724 } 2725 2726 // AdvSIMD Floating-point vector compare 2727 void fcm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { 2728 starti; 2729 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); 2730 int cond_op; 2731 switch (cond) { 2732 case EQ: cond_op = 0b00; break; 2733 case GT: cond_op = 0b11; break; 2734 case GE: cond_op = 0b10; break; 2735 default: 2736 ShouldNotReachHere(); 2737 break; 2738 } 2739 2740 f(0, 31), f((int)T & 1, 30), f((cond_op >> 1) & 1, 29); 2741 f(0b01110, 28, 24), f(cond_op & 1, 23), f(T == T2D ? 1 : 0, 22); 2742 f(1, 21), rf(Vm, 16), f(0b111001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2743 } 2744 2745 #define INSN(NAME, opc) \ 2746 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2747 starti; \ 2748 assert(T == T4S, "arrangement must be T4S"); \ 2749 f(0b01011110000, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2750 } 2751 2752 INSN(sha1c, 0b000000); 2753 INSN(sha1m, 0b001000); 2754 INSN(sha1p, 0b000100); 2755 INSN(sha1su0, 0b001100); 2756 INSN(sha256h2, 0b010100); 2757 INSN(sha256h, 0b010000); 2758 INSN(sha256su1, 0b011000); 2759 2760 #undef INSN 2761 2762 #define INSN(NAME, opc) \ 2763 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2764 starti; \ 2765 assert(T == T4S, "arrangement must be T4S"); \ 2766 f(0b0101111000101000, 31, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2767 } 2768 2769 INSN(sha1h, 0b000010); 2770 INSN(sha1su1, 0b000110); 2771 INSN(sha256su0, 0b001010); 2772 2773 #undef INSN 2774 2775 #define INSN(NAME, opc) \ 2776 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2777 starti; \ 2778 assert(T == T2D, "arrangement must be T2D"); \ 2779 f(0b11001110011, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2780 } 2781 2782 INSN(sha512h, 0b100000); 2783 INSN(sha512h2, 0b100001); 2784 INSN(sha512su1, 0b100010); 2785 2786 #undef INSN 2787 2788 #define INSN(NAME, opc) \ 2789 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2790 starti; \ 2791 assert(T == T2D, "arrangement must be T2D"); \ 2792 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2793 } 2794 2795 INSN(sha512su0, 0b1100111011000000100000); 2796 2797 #undef INSN 2798 2799 #define INSN(NAME, opc) \ 2800 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, FloatRegister Va) { \ 2801 starti; \ 2802 assert(T == T16B, "arrangement must be T16B"); \ 2803 f(0b11001110, 31, 24), f(opc, 23, 21), rf(Vm, 16), f(0b0, 15, 15), rf(Va, 10), rf(Vn, 5), rf(Vd, 0); \ 2804 } 2805 2806 INSN(eor3, 0b000); 2807 INSN(bcax, 0b001); 2808 2809 #undef INSN 2810 2811 #define INSN(NAME, opc) \ 2812 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, unsigned imm) { \ 2813 starti; \ 2814 assert(T == T2D, "arrangement must be T2D"); \ 2815 f(0b11001110, 31, 24), f(opc, 23, 21), rf(Vm, 16), f(imm, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2816 } 2817 2818 INSN(xar, 0b100); 2819 2820 #undef INSN 2821 2822 #define INSN(NAME, opc) \ 2823 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2824 starti; \ 2825 assert(T == T2D, "arrangement must be T2D"); \ 2826 f(0b11001110, 31, 24), f(opc, 23, 21), rf(Vm, 16), f(0b100011, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2827 } 2828 2829 INSN(rax1, 0b011); 2830 2831 #undef INSN 2832 2833 #define INSN(NAME, opc) \ 2834 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2835 starti; \ 2836 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2837 } 2838 2839 INSN(aese, 0b0100111000101000010010); 2840 INSN(aesd, 0b0100111000101000010110); 2841 INSN(aesmc, 0b0100111000101000011010); 2842 INSN(aesimc, 0b0100111000101000011110); 2843 2844 #undef INSN 2845 2846 #define INSN(NAME, op1, op2) \ 2847 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index = 0) { \ 2848 starti; \ 2849 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2850 assert(index >= 0 && ((T == T2D && index <= 1) || (T != T2D && index <= 3)), "invalid index"); \ 2851 f(0, 31), f((int)T & 1, 30), f(op1, 29); f(0b011111, 28, 23); \ 2852 f(T == T2D ? 1 : 0, 22), f(T == T2D ? 0 : index & 1, 21), rf(Vm, 16); \ 2853 f(op2, 15, 12), f(T == T2D ? index : (index >> 1), 11), f(0, 10); \ 2854 rf(Vn, 5), rf(Vd, 0); \ 2855 } 2856 2857 // FMLA/FMLS - Vector - Scalar 2858 INSN(fmlavs, 0, 0b0001); 2859 INSN(fmlsvs, 0, 0b0101); 2860 // FMULX - Vector - Scalar 2861 INSN(fmulxvs, 1, 0b1001); 2862 2863 #undef INSN 2864 2865 // Floating-point Reciprocal Estimate 2866 void frecpe(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) { 2867 assert(type == D || type == S, "Wrong type for frecpe"); 2868 starti; 2869 f(0b010111101, 31, 23); 2870 f(type == D ? 1 : 0, 22); 2871 f(0b100001110110, 21, 10); 2872 rf(Vn, 5), rf(Vd, 0); 2873 } 2874 2875 // (long) {a, b} -> (a + b) 2876 void addpd(FloatRegister Vd, FloatRegister Vn) { 2877 starti; 2878 f(0b0101111011110001101110, 31, 10); 2879 rf(Vn, 5), rf(Vd, 0); 2880 } 2881 2882 // Floating-point AdvSIMD scalar pairwise 2883 #define INSN(NAME, op1, op2) \ 2884 void NAME(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) { \ 2885 starti; \ 2886 assert(type == D || type == S, "Wrong type for faddp/fmaxp/fminp"); \ 2887 f(0b0111111, 31, 25), f(op1, 24, 23), \ 2888 f(type == S ? 0 : 1, 22), f(0b11000, 21, 17), f(op2, 16, 10), rf(Vn, 5), rf(Vd, 0); \ 2889 } 2890 2891 INSN(faddp, 0b00, 0b0110110); 2892 INSN(fmaxp, 0b00, 0b0111110); 2893 INSN(fminp, 0b01, 0b0111110); 2894 2895 #undef INSN 2896 2897 void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) { 2898 starti; 2899 assert(T != Q, "invalid register variant"); 2900 f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15); 2901 f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); 2902 } 2903 2904 #define INSN(NAME, cond, op1, op2) \ 2905 void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \ 2906 starti; \ 2907 assert(cond, "invalid register variant"); \ 2908 f(0, 31), f(op1, 30), f(0b001110000, 29, 21); \ 2909 f(((idx << 1) | 1) << (int)T, 20, 16), f(op2, 15, 10); \ 2910 rf(Vn, 5), rf(Rd, 0); \ 2911 } 2912 2913 INSN(umov, (T != Q), (T == D ? 1 : 0), 0b001111); 2914 INSN(smov, (T < D), 1, 0b001011); 2915 2916 #undef INSN 2917 2918 #define INSN(NAME, opc, opc2, isSHR) \ 2919 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \ 2920 starti; \ 2921 /* The encodings for the immh:immb fields (bits 22:16) in *SHR are \ 2922 * 0001 xxx 8B/16B, shift = 16 - UInt(immh:immb) \ 2923 * 001x xxx 4H/8H, shift = 32 - UInt(immh:immb) \ 2924 * 01xx xxx 2S/4S, shift = 64 - UInt(immh:immb) \ 2925 * 1xxx xxx 1D/2D, shift = 128 - UInt(immh:immb) \ 2926 * (1D is RESERVED) \ 2927 * for SHL shift is calculated as: \ 2928 * 0001 xxx 8B/16B, shift = UInt(immh:immb) - 8 \ 2929 * 001x xxx 4H/8H, shift = UInt(immh:immb) - 16 \ 2930 * 01xx xxx 2S/4S, shift = UInt(immh:immb) - 32 \ 2931 * 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \ 2932 * (1D is RESERVED) \ 2933 */ \ 2934 guarantee(!isSHR || (isSHR && (shift != 0)), "impossible encoding");\ 2935 assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \ 2936 int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \ 2937 int encodedShift = isSHR ? cVal - shift : cVal + shift; \ 2938 f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \ 2939 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2940 } 2941 2942 INSN(shl, 0, 0b010101, /* isSHR = */ false); 2943 INSN(sshr, 0, 0b000001, /* isSHR = */ true); 2944 INSN(ushr, 1, 0b000001, /* isSHR = */ true); 2945 INSN(usra, 1, 0b000101, /* isSHR = */ true); 2946 INSN(ssra, 0, 0b000101, /* isSHR = */ true); 2947 INSN(sli, 1, 0b010101, /* isSHR = */ false); 2948 2949 #undef INSN 2950 2951 #define INSN(NAME, opc, opc2, isSHR) \ 2952 void NAME(FloatRegister Vd, FloatRegister Vn, int shift){ \ 2953 starti; \ 2954 int encodedShift = isSHR ? 128 - shift : 64 + shift; \ 2955 f(0b01, 31, 30), f(opc, 29), f(0b111110, 28, 23), \ 2956 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2957 } 2958 2959 INSN(shld, 0, 0b010101, /* isSHR = */ false); 2960 INSN(sshrd, 0, 0b000001, /* isSHR = */ true); 2961 INSN(ushrd, 1, 0b000001, /* isSHR = */ true); 2962 2963 #undef INSN 2964 2965 protected: 2966 void _xshll(bool is_unsigned, FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2967 starti; 2968 /* The encodings for the immh:immb fields (bits 22:16) are 2969 * 0001 xxx 8H, 8B/16B shift = xxx 2970 * 001x xxx 4S, 4H/8H shift = xxxx 2971 * 01xx xxx 2D, 2S/4S shift = xxxxx 2972 * 1xxx xxx RESERVED 2973 */ 2974 assert((Tb >> 1) + 1 == (Ta >> 1), "Incompatible arrangement"); 2975 assert((1 << ((Tb>>1)+3)) > shift, "Invalid shift value"); 2976 f(0, 31), f(Tb & 1, 30), f(is_unsigned ? 1 : 0, 29), f(0b011110, 28, 23); 2977 f((1 << ((Tb>>1)+3))|shift, 22, 16); 2978 f(0b101001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2979 } 2980 2981 public: 2982 void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2983 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement"); 2984 _xshll(/* is_unsigned */ true, Vd, Ta, Vn, Tb, shift); 2985 } 2986 2987 void ushll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2988 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement"); 2989 _xshll(/* is_unsigned */ true, Vd, Ta, Vn, Tb, shift); 2990 } 2991 2992 void uxtl(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) { 2993 ushll(Vd, Ta, Vn, Tb, 0); 2994 } 2995 2996 void sshll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2997 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement"); 2998 _xshll(/* is_unsigned */ false, Vd, Ta, Vn, Tb, shift); 2999 } 3000 3001 void sshll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 3002 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement"); 3003 _xshll(/* is_unsigned */ false, Vd, Ta, Vn, Tb, shift); 3004 } 3005 3006 void sxtl(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) { 3007 sshll(Vd, Ta, Vn, Tb, 0); 3008 } 3009 3010 // Move from general purpose register 3011 // mov Vd.T[index], Rn 3012 void mov(FloatRegister Vd, SIMD_RegVariant T, int index, Register Xn) { 3013 guarantee(T != Q, "invalid register variant"); 3014 starti; 3015 f(0b01001110000, 31, 21), f(((1 << T) | (index << (T + 1))), 20, 16); 3016 f(0b000111, 15, 10), zrf(Xn, 5), rf(Vd, 0); 3017 } 3018 3019 // Move to general purpose register 3020 // mov Rd, Vn.T[index] 3021 void mov(Register Xd, FloatRegister Vn, SIMD_RegVariant T, int index) { 3022 guarantee(T == S || T == D, "invalid register variant"); 3023 umov(Xd, Vn, T, index); 3024 } 3025 3026 private: 3027 void _pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 3028 starti; 3029 assert((Ta == T1Q && (Tb == T1D || Tb == T2D)) || 3030 (Ta == T8H && (Tb == T8B || Tb == T16B)), "Invalid Size specifier"); 3031 int size = (Ta == T1Q) ? 0b11 : 0b00; 3032 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size, 23, 22); 3033 f(1, 21), rf(Vm, 16), f(0b111000, 15, 10), rf(Vn, 5), rf(Vd, 0); 3034 } 3035 3036 public: 3037 void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 3038 assert(Tb == T1D || Tb == T8B, "pmull assumes T1D or T8B as the second size specifier"); 3039 _pmull(Vd, Ta, Vn, Vm, Tb); 3040 } 3041 3042 void pmull2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 3043 assert(Tb == T2D || Tb == T16B, "pmull2 assumes T2D or T16B as the second size specifier"); 3044 _pmull(Vd, Ta, Vn, Vm, Tb); 3045 } 3046 3047 void uqxtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) { 3048 starti; 3049 int size_b = (int)Tb >> 1; 3050 int size_a = (int)Ta >> 1; 3051 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier"); 3052 f(0, 31), f(Tb & 1, 30), f(0b101110, 29, 24), f(size_b, 23, 22); 3053 f(0b100001010010, 21, 10), rf(Vn, 5), rf(Vd, 0); 3054 } 3055 3056 void xtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) { 3057 starti; 3058 int size_b = (int)Tb >> 1; 3059 int size_a = (int)Ta >> 1; 3060 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier"); 3061 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size_b, 23, 22); 3062 f(0b100001001010, 21, 10), rf(Vn, 5), rf(Vd, 0); 3063 } 3064 3065 void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs) 3066 { 3067 starti; 3068 assert(T != T1D, "reserved encoding"); 3069 f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21); 3070 f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), zrf(Xs, 5), rf(Vd, 0); 3071 } 3072 3073 void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0) 3074 { 3075 starti; 3076 assert(T != T1D, "reserved encoding"); 3077 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21); 3078 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 3079 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0); 3080 } 3081 3082 // Advanced SIMD scalar copy 3083 void dup(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int index = 0) 3084 { 3085 starti; 3086 assert(T != Q, "invalid size"); 3087 f(0b01011110000, 31, 21); 3088 f((1 << T) | (index << (T + 1)), 20, 16); 3089 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0); 3090 } 3091 3092 // AdvSIMD ZIP/UZP/TRN 3093 #define INSN(NAME, opcode) \ 3094 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 3095 guarantee(T != T1D && T != T1Q, "invalid arrangement"); \ 3096 starti; \ 3097 f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \ 3098 f(opcode, 14, 12), f(0b10, 11, 10); \ 3099 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \ 3100 f(T & 1, 30), f(T >> 1, 23, 22); \ 3101 } 3102 3103 INSN(uzp1, 0b001); 3104 INSN(trn1, 0b010); 3105 INSN(zip1, 0b011); 3106 INSN(uzp2, 0b101); 3107 INSN(trn2, 0b110); 3108 INSN(zip2, 0b111); 3109 3110 #undef INSN 3111 3112 // CRC32 instructions 3113 #define INSN(NAME, c, sf, sz) \ 3114 void NAME(Register Rd, Register Rn, Register Rm) { \ 3115 starti; \ 3116 f(sf, 31), f(0b0011010110, 30, 21), f(0b010, 15, 13), f(c, 12); \ 3117 f(sz, 11, 10), rf(Rm, 16), rf(Rn, 5), rf(Rd, 0); \ 3118 } 3119 3120 INSN(crc32b, 0, 0, 0b00); 3121 INSN(crc32h, 0, 0, 0b01); 3122 INSN(crc32w, 0, 0, 0b10); 3123 INSN(crc32x, 0, 1, 0b11); 3124 INSN(crc32cb, 1, 0, 0b00); 3125 INSN(crc32ch, 1, 0, 0b01); 3126 INSN(crc32cw, 1, 0, 0b10); 3127 INSN(crc32cx, 1, 1, 0b11); 3128 3129 #undef INSN 3130 3131 // Table vector lookup 3132 #define INSN(NAME, op) \ 3133 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \ 3134 starti; \ 3135 assert(T == T8B || T == T16B, "invalid arrangement"); \ 3136 assert(0 < registers && registers <= 4, "invalid number of registers"); \ 3137 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \ 3138 f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 3139 } 3140 3141 INSN(tbl, 0); 3142 INSN(tbx, 1); 3143 3144 #undef INSN 3145 3146 // AdvSIMD two-reg misc 3147 // In this instruction group, the 2 bits in the size field ([23:22]) may be 3148 // fixed or determined by the "SIMD_Arrangement T", or both. The additional 3149 // parameter "tmask" is a 2-bit mask used to indicate which bits in the size 3150 // field are determined by the SIMD_Arrangement. The bit of "tmask" should be 3151 // set to 1 if corresponding bit marked as "x" in the ArmARM. 3152 #define INSN(NAME, U, size, tmask, opcode) \ 3153 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 3154 starti; \ 3155 assert((ASSERTION), MSG); \ 3156 f(0, 31), f((int)T & 1, 30), f(U, 29), f(0b01110, 28, 24); \ 3157 f(size | ((int)(T >> 1) & tmask), 23, 22), f(0b10000, 21, 17); \ 3158 f(opcode, 16, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 3159 } 3160 3161 #define MSG "invalid arrangement" 3162 3163 #define ASSERTION (T == T2S || T == T4S || T == T2D) 3164 INSN(fsqrt, 1, 0b10, 0b01, 0b11111); 3165 INSN(fabs, 0, 0b10, 0b01, 0b01111); 3166 INSN(fneg, 1, 0b10, 0b01, 0b01111); 3167 INSN(frintn, 0, 0b00, 0b01, 0b11000); 3168 INSN(frintm, 0, 0b00, 0b01, 0b11001); 3169 INSN(frintp, 0, 0b10, 0b01, 0b11000); 3170 INSN(fcvtas, 0, 0b00, 0b01, 0b11100); 3171 INSN(fcvtzs, 0, 0b10, 0b01, 0b11011); 3172 INSN(fcvtms, 0, 0b00, 0b01, 0b11011); 3173 #undef ASSERTION 3174 3175 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S) 3176 INSN(rev64, 0, 0b00, 0b11, 0b00000); 3177 #undef ASSERTION 3178 3179 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H) 3180 INSN(rev32, 1, 0b00, 0b11, 0b00000); 3181 #undef ASSERTION 3182 3183 #define ASSERTION (T == T8B || T == T16B) 3184 INSN(rev16, 0, 0b00, 0b11, 0b00001); 3185 INSN(rbit, 1, 0b01, 0b00, 0b00101); 3186 #undef ASSERTION 3187 3188 #undef MSG 3189 3190 #undef INSN 3191 3192 // AdvSIMD compare with zero (vector) 3193 void cm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { 3194 starti; 3195 assert(T != T1Q && T != T1D, "invalid arrangement"); 3196 int cond_op; 3197 switch (cond) { 3198 case EQ: cond_op = 0b001; break; 3199 case GE: cond_op = 0b100; break; 3200 case GT: cond_op = 0b000; break; 3201 case LE: cond_op = 0b101; break; 3202 case LT: cond_op = 0b010; break; 3203 default: 3204 ShouldNotReachHere(); 3205 break; 3206 } 3207 3208 f(0, 31), f((int)T & 1, 30), f((cond_op >> 2) & 1, 29); 3209 f(0b01110, 28, 24), f((int)T >> 1, 23, 22), f(0b10000010, 21, 14); 3210 f(cond_op & 0b11, 13, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); 3211 } 3212 3213 // AdvSIMD Floating-point compare with zero (vector) 3214 void fcm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { 3215 starti; 3216 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); 3217 int cond_op; 3218 switch (cond) { 3219 case EQ: cond_op = 0b010; break; 3220 case GT: cond_op = 0b000; break; 3221 case GE: cond_op = 0b001; break; 3222 case LE: cond_op = 0b011; break; 3223 case LT: cond_op = 0b100; break; 3224 default: 3225 ShouldNotReachHere(); 3226 break; 3227 } 3228 3229 f(0, 31), f((int)T & 1, 30), f(cond_op & 1, 29), f(0b011101, 28, 23); 3230 f(((int)(T >> 1) & 1), 22), f(0b10000011, 21, 14); 3231 f((cond_op >> 1) & 0b11, 13, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); 3232 } 3233 3234 void ext(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index) 3235 { 3236 starti; 3237 assert(T == T8B || T == T16B, "invalid arrangement"); 3238 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value"); 3239 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21); 3240 rf(Vm, 16), f(0, 15), f(index, 14, 11); 3241 f(0, 10), rf(Vn, 5), rf(Vd, 0); 3242 } 3243 3244 // SVE arithmetic - unpredicated 3245 #define INSN(NAME, opcode) \ 3246 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \ 3247 starti; \ 3248 assert(T != Q, "invalid register variant"); \ 3249 f(0b00000100, 31, 24), f(T, 23, 22), f(1, 21), \ 3250 rf(Zm, 16), f(0, 15, 13), f(opcode, 12, 10), rf(Zn, 5), rf(Zd, 0); \ 3251 } 3252 INSN(sve_add, 0b000); 3253 INSN(sve_sub, 0b001); 3254 #undef INSN 3255 3256 // SVE integer add/subtract immediate (unpredicated) 3257 #define INSN(NAME, op) \ 3258 void NAME(FloatRegister Zd, SIMD_RegVariant T, unsigned imm8) { \ 3259 starti; \ 3260 /* The immediate is an unsigned value in the range 0 to 255, and \ 3261 * for element width of 16 bits or higher it may also be a \ 3262 * positive multiple of 256 in the range 256 to 65280. \ 3263 */ \ 3264 assert(T != Q, "invalid size"); \ 3265 int sh = 0; \ 3266 if (imm8 <= 0xff) { \ 3267 sh = 0; \ 3268 } else if (T != B && imm8 <= 0xff00 && (imm8 & 0xff) == 0) { \ 3269 sh = 1; \ 3270 imm8 = (imm8 >> 8); \ 3271 } else { \ 3272 guarantee(false, "invalid immediate"); \ 3273 } \ 3274 f(0b00100101, 31, 24), f(T, 23, 22), f(0b10000, 21, 17); \ 3275 f(op, 16, 14), f(sh, 13), f(imm8, 12, 5), rf(Zd, 0); \ 3276 } 3277 3278 INSN(sve_add, 0b011); 3279 INSN(sve_sub, 0b111); 3280 #undef INSN 3281 3282 // SVE floating-point arithmetic - unpredicated 3283 #define INSN(NAME, opcode) \ 3284 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \ 3285 starti; \ 3286 assert(T == S || T == D, "invalid register variant"); \ 3287 f(0b01100101, 31, 24), f(T, 23, 22), f(0, 21), \ 3288 rf(Zm, 16), f(0, 15, 13), f(opcode, 12, 10), rf(Zn, 5), rf(Zd, 0); \ 3289 } 3290 3291 INSN(sve_fadd, 0b000); 3292 INSN(sve_fmul, 0b010); 3293 INSN(sve_fsub, 0b001); 3294 #undef INSN 3295 3296 private: 3297 void sve_predicate_reg_insn(unsigned op24, unsigned op13, 3298 FloatRegister Zd_or_Vd, SIMD_RegVariant T, 3299 PRegister Pg, FloatRegister Zn_or_Vn) { 3300 starti; 3301 f(op24, 31, 24), f(T, 23, 22), f(op13, 21, 13); 3302 pgrf(Pg, 10), rf(Zn_or_Vn, 5), rf(Zd_or_Vd, 0); 3303 } 3304 3305 void sve_shift_imm_encoding(SIMD_RegVariant T, int shift, bool isSHR, 3306 int& tszh, int& tszl_imm) { 3307 /* The encodings for the tszh:tszl:imm3 fields 3308 * for shift right is calculated as: 3309 * 0001 xxx B, shift = 16 - UInt(tszh:tszl:imm3) 3310 * 001x xxx H, shift = 32 - UInt(tszh:tszl:imm3) 3311 * 01xx xxx S, shift = 64 - UInt(tszh:tszl:imm3) 3312 * 1xxx xxx D, shift = 128 - UInt(tszh:tszl:imm3) 3313 * for shift left is calculated as: 3314 * 0001 xxx B, shift = UInt(tszh:tszl:imm3) - 8 3315 * 001x xxx H, shift = UInt(tszh:tszl:imm3) - 16 3316 * 01xx xxx S, shift = UInt(tszh:tszl:imm3) - 32 3317 * 1xxx xxx D, shift = UInt(tszh:tszl:imm3) - 64 3318 */ 3319 assert(T != Q, "Invalid register variant"); 3320 if (isSHR) { 3321 assert(((1 << (T + 3)) >= shift) && (shift > 0) , "Invalid shift value"); 3322 } else { 3323 assert(((1 << (T + 3)) > shift) && (shift >= 0) , "Invalid shift value"); 3324 } 3325 int cVal = (1 << ((T + 3) + (isSHR ? 1 : 0))); 3326 int encodedShift = isSHR ? cVal - shift : cVal + shift; 3327 tszh = encodedShift >> 5; 3328 tszl_imm = encodedShift & 0x1f; 3329 } 3330 3331 public: 3332 3333 // SVE integer arithmetic - predicate 3334 #define INSN(NAME, op1, op2) \ 3335 void NAME(FloatRegister Zdn_or_Zd_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Znm_or_Vn) { \ 3336 assert(T != Q, "invalid register variant"); \ 3337 sve_predicate_reg_insn(op1, op2, Zdn_or_Zd_or_Vd, T, Pg, Znm_or_Vn); \ 3338 } 3339 3340 INSN(sve_abs, 0b00000100, 0b010110101); // vector abs, unary 3341 INSN(sve_add, 0b00000100, 0b000000000); // vector add 3342 INSN(sve_and, 0b00000100, 0b011010000); // vector and 3343 INSN(sve_andv, 0b00000100, 0b011010001); // bitwise and reduction to scalar 3344 INSN(sve_asr, 0b00000100, 0b010000100); // vector arithmetic shift right 3345 INSN(sve_bic, 0b00000100, 0b011011000); // vector bitwise clear 3346 INSN(sve_clz, 0b00000100, 0b011001101); // vector count leading zero bits 3347 INSN(sve_cnt, 0b00000100, 0b011010101); // count non-zero bits 3348 INSN(sve_cpy, 0b00000101, 0b100000100); // copy scalar to each active vector element 3349 INSN(sve_eor, 0b00000100, 0b011001000); // vector eor 3350 INSN(sve_eorv, 0b00000100, 0b011001001); // bitwise xor reduction to scalar 3351 INSN(sve_lsl, 0b00000100, 0b010011100); // vector logical shift left 3352 INSN(sve_lsr, 0b00000100, 0b010001100); // vector logical shift right 3353 INSN(sve_mul, 0b00000100, 0b010000000); // vector mul 3354 INSN(sve_neg, 0b00000100, 0b010111101); // vector neg, unary 3355 INSN(sve_not, 0b00000100, 0b011110101); // bitwise invert vector, unary 3356 INSN(sve_orr, 0b00000100, 0b011000000); // vector or 3357 INSN(sve_orv, 0b00000100, 0b011000001); // bitwise or reduction to scalar 3358 INSN(sve_smax, 0b00000100, 0b001000000); // signed maximum vectors 3359 INSN(sve_smaxv, 0b00000100, 0b001000001); // signed maximum reduction to scalar 3360 INSN(sve_smin, 0b00000100, 0b001010000); // signed minimum vectors 3361 INSN(sve_sminv, 0b00000100, 0b001010001); // signed minimum reduction to scalar 3362 INSN(sve_sub, 0b00000100, 0b000001000); // vector sub 3363 INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar 3364 #undef INSN 3365 3366 // SVE floating-point arithmetic - predicate 3367 #define INSN(NAME, op1, op2) \ 3368 void NAME(FloatRegister Zd_or_Zdn_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn_or_Zm) { \ 3369 assert(T == S || T == D, "invalid register variant"); \ 3370 sve_predicate_reg_insn(op1, op2, Zd_or_Zdn_or_Vd, T, Pg, Zn_or_Zm); \ 3371 } 3372 3373 INSN(sve_fabd, 0b01100101, 0b001000100); // floating-point absolute difference 3374 INSN(sve_fabs, 0b00000100, 0b011100101); 3375 INSN(sve_fadd, 0b01100101, 0b000000100); 3376 INSN(sve_fadda, 0b01100101, 0b011000001); // add strictly-ordered reduction to scalar Vd 3377 INSN(sve_fdiv, 0b01100101, 0b001101100); 3378 INSN(sve_fmax, 0b01100101, 0b000110100); // floating-point maximum 3379 INSN(sve_fmaxv, 0b01100101, 0b000110001); // floating-point maximum recursive reduction to scalar 3380 INSN(sve_fmin, 0b01100101, 0b000111100); // floating-point minimum 3381 INSN(sve_fminv, 0b01100101, 0b000111001); // floating-point minimum recursive reduction to scalar 3382 INSN(sve_fmul, 0b01100101, 0b000010100); 3383 INSN(sve_fneg, 0b00000100, 0b011101101); 3384 INSN(sve_frintm, 0b01100101, 0b000010101); // floating-point round to integral value, toward minus infinity 3385 INSN(sve_frintn, 0b01100101, 0b000000101); // floating-point round to integral value, nearest with ties to even 3386 INSN(sve_frinta, 0b01100101, 0b000100101); // floating-point round to integral value, nearest with ties to away 3387 INSN(sve_frintp, 0b01100101, 0b000001101); // floating-point round to integral value, toward plus infinity 3388 INSN(sve_fsqrt, 0b01100101, 0b001101101); 3389 INSN(sve_fsub, 0b01100101, 0b000001100); 3390 #undef INSN 3391 3392 // SVE multiple-add/sub - predicated 3393 #define INSN(NAME, op0, op1, op2) \ 3394 void NAME(FloatRegister Zda, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn, FloatRegister Zm) { \ 3395 starti; \ 3396 assert(T != Q, "invalid size"); \ 3397 f(op0, 31, 24), f(T, 23, 22), f(op1, 21), rf(Zm, 16); \ 3398 f(op2, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zda, 0); \ 3399 } 3400 3401 INSN(sve_fmla, 0b01100101, 1, 0b000); // floating-point fused multiply-add, writing addend: Zda = Zda + Zn * Zm 3402 INSN(sve_fmls, 0b01100101, 1, 0b001); // floating-point fused multiply-subtract: Zda = Zda + -Zn * Zm 3403 INSN(sve_fnmla, 0b01100101, 1, 0b010); // floating-point negated fused multiply-add: Zda = -Zda + -Zn * Zm 3404 INSN(sve_fnmls, 0b01100101, 1, 0b011); // floating-point negated fused multiply-subtract: Zda = -Zda + Zn * Zm 3405 INSN(sve_fmad, 0b01100101, 1, 0b100); // floating-point fused multiply-add, writing multiplicand: Zda = Zm + Zda * Zn 3406 INSN(sve_fmsb, 0b01100101, 1, 0b101); // floating-point fused multiply-subtract, writing multiplicand: Zda = Zm + -Zda * Zn 3407 INSN(sve_fnmad, 0b01100101, 1, 0b110); // floating-point negated fused multiply-add, writing multiplicand: Zda = -Zm + -Zda * Zn 3408 INSN(sve_fnmsb, 0b01100101, 1, 0b111); // floating-point negated fused multiply-subtract, writing multiplicand: Zda = -Zm + Zda * Zn 3409 INSN(sve_mla, 0b00000100, 0, 0b010); // multiply-add, writing addend: Zda = Zda + Zn*Zm 3410 INSN(sve_mls, 0b00000100, 0, 0b011); // multiply-subtract, writing addend: Zda = Zda + -Zn*Zm 3411 #undef INSN 3412 3413 // SVE bitwise logical - unpredicated 3414 #define INSN(NAME, opc) \ 3415 void NAME(FloatRegister Zd, FloatRegister Zn, FloatRegister Zm) { \ 3416 starti; \ 3417 f(0b00000100, 31, 24), f(opc, 23, 22), f(1, 21), \ 3418 rf(Zm, 16), f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0); \ 3419 } 3420 INSN(sve_and, 0b00); 3421 INSN(sve_eor, 0b10); 3422 INSN(sve_orr, 0b01); 3423 INSN(sve_bic, 0b11); 3424 #undef INSN 3425 3426 // SVE bitwise logical with immediate (unpredicated) 3427 #define INSN(NAME, opc) \ 3428 void NAME(FloatRegister Zd, SIMD_RegVariant T, uint64_t imm) { \ 3429 starti; \ 3430 unsigned elembits = regVariant_to_elemBits(T); \ 3431 uint32_t val = encode_sve_logical_immediate(elembits, imm); \ 3432 f(0b00000101, 31, 24), f(opc, 23, 22), f(0b0000, 21, 18); \ 3433 f(val, 17, 5), rf(Zd, 0); \ 3434 } 3435 INSN(sve_and, 0b10); 3436 INSN(sve_eor, 0b01); 3437 INSN(sve_orr, 0b00); 3438 #undef INSN 3439 3440 // SVE shift immediate - unpredicated 3441 #define INSN(NAME, opc, isSHR) \ 3442 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, int shift) { \ 3443 starti; \ 3444 int tszh, tszl_imm; \ 3445 sve_shift_imm_encoding(T, shift, isSHR, tszh, tszl_imm); \ 3446 f(0b00000100, 31, 24); \ 3447 f(tszh, 23, 22), f(1,21), f(tszl_imm, 20, 16); \ 3448 f(0b100, 15, 13), f(opc, 12, 10), rf(Zn, 5), rf(Zd, 0); \ 3449 } 3450 3451 INSN(sve_asr, 0b100, /* isSHR = */ true); 3452 INSN(sve_lsl, 0b111, /* isSHR = */ false); 3453 INSN(sve_lsr, 0b101, /* isSHR = */ true); 3454 #undef INSN 3455 3456 // SVE bitwise shift by immediate (predicated) 3457 #define INSN(NAME, opc, isSHR) \ 3458 void NAME(FloatRegister Zdn, SIMD_RegVariant T, PRegister Pg, int shift) { \ 3459 starti; \ 3460 int tszh, tszl_imm; \ 3461 sve_shift_imm_encoding(T, shift, isSHR, tszh, tszl_imm); \ 3462 f(0b00000100, 31, 24), f(tszh, 23, 22), f(0b00, 21, 20), f(opc, 19, 16); \ 3463 f(0b100, 15, 13), pgrf(Pg, 10), f(tszl_imm, 9, 5), rf(Zdn, 0); \ 3464 } 3465 3466 INSN(sve_asr, 0b0000, /* isSHR = */ true); 3467 INSN(sve_lsl, 0b0011, /* isSHR = */ false); 3468 INSN(sve_lsr, 0b0001, /* isSHR = */ true); 3469 #undef INSN 3470 3471 private: 3472 3473 // Scalar base + immediate index 3474 void sve_ld_st1(FloatRegister Zt, Register Xn, int imm, PRegister Pg, 3475 SIMD_RegVariant T, int op1, int type, int op2) { 3476 starti; 3477 assert_cond(T >= type); 3478 f(op1, 31, 25), f(type, 24, 23), f(T, 22, 21); 3479 f(0, 20), sf(imm, 19, 16), f(op2, 15, 13); 3480 pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0); 3481 } 3482 3483 // Scalar base + scalar index 3484 void sve_ld_st1(FloatRegister Zt, Register Xn, Register Xm, PRegister Pg, 3485 SIMD_RegVariant T, int op1, int type, int op2) { 3486 starti; 3487 assert_cond(T >= type); 3488 f(op1, 31, 25), f(type, 24, 23), f(T, 22, 21); 3489 rf(Xm, 16), f(op2, 15, 13); 3490 pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0); 3491 } 3492 3493 void sve_ld_st1(FloatRegister Zt, PRegister Pg, 3494 SIMD_RegVariant T, const Address &a, 3495 int op1, int type, int imm_op2, int scalar_op2) { 3496 switch (a.getMode()) { 3497 case Address::base_plus_offset: 3498 sve_ld_st1(Zt, a.base(), checked_cast<int>(a.offset()), Pg, T, op1, type, imm_op2); 3499 break; 3500 case Address::base_plus_offset_reg: 3501 sve_ld_st1(Zt, a.base(), a.index(), Pg, T, op1, type, scalar_op2); 3502 break; 3503 default: 3504 ShouldNotReachHere(); 3505 } 3506 } 3507 3508 public: 3509 3510 // SVE contiguous load/store 3511 #define INSN(NAME, op1, type, imm_op2, scalar_op2) \ 3512 void NAME(FloatRegister Zt, SIMD_RegVariant T, PRegister Pg, const Address &a) { \ 3513 assert(T != Q, "invalid register variant"); \ 3514 sve_ld_st1(Zt, Pg, T, a, op1, type, imm_op2, scalar_op2); \ 3515 } 3516 3517 INSN(sve_ld1b, 0b1010010, 0b00, 0b101, 0b010); 3518 INSN(sve_st1b, 0b1110010, 0b00, 0b111, 0b010); 3519 INSN(sve_ld1h, 0b1010010, 0b01, 0b101, 0b010); 3520 INSN(sve_st1h, 0b1110010, 0b01, 0b111, 0b010); 3521 INSN(sve_ld1w, 0b1010010, 0b10, 0b101, 0b010); 3522 INSN(sve_st1w, 0b1110010, 0b10, 0b111, 0b010); 3523 INSN(sve_ld1d, 0b1010010, 0b11, 0b101, 0b010); 3524 INSN(sve_st1d, 0b1110010, 0b11, 0b111, 0b010); 3525 #undef INSN 3526 3527 // Gather/scatter load/store (SVE) - scalar plus vector 3528 #define INSN(NAME, op1, type, op2, op3) \ 3529 void NAME(FloatRegister Zt, PRegister Pg, Register Xn, FloatRegister Zm) { \ 3530 starti; \ 3531 f(op1, 31, 25), f(type, 24, 23), f(op2, 22, 21), rf(Zm, 16); \ 3532 f(op3, 15, 13), pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0); \ 3533 } 3534 // SVE 32-bit gather load words (scalar plus 32-bit scaled offsets) 3535 INSN(sve_ld1w_gather, 0b1000010, 0b10, 0b01, 0b010); 3536 // SVE 64-bit gather load (scalar plus 32-bit unpacked scaled offsets) 3537 INSN(sve_ld1d_gather, 0b1100010, 0b11, 0b01, 0b010); 3538 // SVE 32-bit scatter store (scalar plus 32-bit scaled offsets) 3539 INSN(sve_st1w_scatter, 0b1110010, 0b10, 0b11, 0b100); 3540 // SVE 64-bit scatter store (scalar plus unpacked 32-bit scaled offsets) 3541 INSN(sve_st1d_scatter, 0b1110010, 0b11, 0b01, 0b100); 3542 #undef INSN 3543 3544 // SVE load/store - unpredicated 3545 #define INSN(NAME, op1) \ 3546 void NAME(FloatRegister Zt, const Address &a) { \ 3547 starti; \ 3548 assert(a.index() == noreg, "invalid address variant"); \ 3549 f(op1, 31, 29), f(0b0010110, 28, 22), sf(a.offset() >> 3, 21, 16), \ 3550 f(0b010, 15, 13), f(a.offset() & 0x7, 12, 10), srf(a.base(), 5), rf(Zt, 0); \ 3551 } 3552 3553 INSN(sve_ldr, 0b100); // LDR (vector) 3554 INSN(sve_str, 0b111); // STR (vector) 3555 #undef INSN 3556 3557 // SVE stack frame adjustment 3558 #define INSN(NAME, op) \ 3559 void NAME(Register Xd, Register Xn, int imm6) { \ 3560 starti; \ 3561 f(0b000001000, 31, 23), f(op, 22, 21); \ 3562 srf(Xn, 16), f(0b01010, 15, 11), sf(imm6, 10, 5), srf(Xd, 0); \ 3563 } 3564 3565 INSN(sve_addvl, 0b01); // Add multiple of vector register size to scalar register 3566 INSN(sve_addpl, 0b11); // Add multiple of predicate register size to scalar register 3567 #undef INSN 3568 3569 // SVE inc/dec register by element count 3570 #define INSN(NAME, op) \ 3571 void NAME(Register Xdn, SIMD_RegVariant T, unsigned imm4 = 1, int pattern = 0b11111) { \ 3572 starti; \ 3573 assert(T != Q, "invalid size"); \ 3574 f(0b00000100,31, 24), f(T, 23, 22), f(0b11, 21, 20); \ 3575 f(imm4 - 1, 19, 16), f(0b11100, 15, 11), f(op, 10), f(pattern, 9, 5), rf(Xdn, 0); \ 3576 } 3577 3578 INSN(sve_inc, 0); 3579 INSN(sve_dec, 1); 3580 #undef INSN 3581 3582 // SVE predicate logical operations 3583 #define INSN(NAME, op1, op2, op3) \ 3584 void NAME(PRegister Pd, PRegister Pg, PRegister Pn, PRegister Pm) { \ 3585 starti; \ 3586 f(0b00100101, 31, 24), f(op1, 23, 22), f(0b00, 21, 20); \ 3587 prf(Pm, 16), f(0b01, 15, 14), prf(Pg, 10), f(op2, 9); \ 3588 prf(Pn, 5), f(op3, 4), prf(Pd, 0); \ 3589 } 3590 3591 INSN(sve_and, 0b00, 0b0, 0b0); 3592 INSN(sve_ands, 0b01, 0b0, 0b0); 3593 INSN(sve_eor, 0b00, 0b1, 0b0); 3594 INSN(sve_eors, 0b01, 0b1, 0b0); 3595 INSN(sve_orr, 0b10, 0b0, 0b0); 3596 INSN(sve_orrs, 0b11, 0b0, 0b0); 3597 INSN(sve_bic, 0b00, 0b0, 0b1); 3598 #undef INSN 3599 3600 // SVE increment register by predicate count 3601 void sve_incp(const Register rd, SIMD_RegVariant T, PRegister pg) { 3602 starti; 3603 assert(T != Q, "invalid size"); 3604 f(0b00100101, 31, 24), f(T, 23, 22), f(0b1011001000100, 21, 9), 3605 prf(pg, 5), rf(rd, 0); 3606 } 3607 3608 // SVE broadcast general-purpose register to vector elements (unpredicated) 3609 void sve_dup(FloatRegister Zd, SIMD_RegVariant T, Register Rn) { 3610 starti; 3611 assert(T != Q, "invalid size"); 3612 f(0b00000101, 31, 24), f(T, 23, 22), f(0b100000001110, 21, 10); 3613 srf(Rn, 5), rf(Zd, 0); 3614 } 3615 3616 // SVE broadcast signed immediate to vector elements (unpredicated) 3617 void sve_dup(FloatRegister Zd, SIMD_RegVariant T, int imm8) { 3618 starti; 3619 assert(T != Q, "invalid size"); 3620 int sh = 0; 3621 if (imm8 <= 127 && imm8 >= -128) { 3622 sh = 0; 3623 } else if (T != B && imm8 <= 32512 && imm8 >= -32768 && (imm8 & 0xff) == 0) { 3624 sh = 1; 3625 imm8 = (imm8 >> 8); 3626 } else { 3627 guarantee(false, "invalid immediate"); 3628 } 3629 f(0b00100101, 31, 24), f(T, 23, 22), f(0b11100011, 21, 14); 3630 f(sh, 13), sf(imm8, 12, 5), rf(Zd, 0); 3631 } 3632 3633 // SVE predicate test 3634 void sve_ptest(PRegister Pg, PRegister Pn) { 3635 starti; 3636 f(0b001001010101000011, 31, 14), prf(Pg, 10), f(0, 9), prf(Pn, 5), f(0, 4, 0); 3637 } 3638 3639 // SVE predicate initialize 3640 void sve_ptrue(PRegister pd, SIMD_RegVariant esize, int pattern = 0b11111) { 3641 starti; 3642 f(0b00100101, 31, 24), f(esize, 23, 22), f(0b011000111000, 21, 10); 3643 f(pattern, 9, 5), f(0b0, 4), prf(pd, 0); 3644 } 3645 3646 // SVE predicate zero 3647 void sve_pfalse(PRegister pd) { 3648 starti; 3649 f(0b00100101, 31, 24), f(0b00, 23, 22), f(0b011000111001, 21, 10); 3650 f(0b000000, 9, 4), prf(pd, 0); 3651 } 3652 3653 // SVE load/store predicate register 3654 #define INSN(NAME, op1) \ 3655 void NAME(PRegister Pt, const Address &a) { \ 3656 starti; \ 3657 assert(a.index() == noreg, "invalid address variant"); \ 3658 f(op1, 31, 29), f(0b0010110, 28, 22), sf(a.offset() >> 3, 21, 16), \ 3659 f(0b000, 15, 13), f(a.offset() & 0x7, 12, 10), srf(a.base(), 5), \ 3660 f(0, 4), prf(Pt, 0); \ 3661 } 3662 3663 INSN(sve_ldr, 0b100); // LDR (predicate) 3664 INSN(sve_str, 0b111); // STR (predicate) 3665 #undef INSN 3666 3667 // SVE move predicate register 3668 void sve_mov(PRegister Pd, PRegister Pn) { 3669 starti; 3670 f(0b001001011000, 31, 20), prf(Pn, 16), f(0b01, 15, 14), prf(Pn, 10); 3671 f(0, 9), prf(Pn, 5), f(0, 4), prf(Pd, 0); 3672 } 3673 3674 // SVE copy general-purpose register to vector elements (predicated) 3675 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, Register Rn) { 3676 starti; 3677 assert(T != Q, "invalid size"); 3678 f(0b00000101, 31, 24), f(T, 23, 22), f(0b101000101, 21, 13); 3679 pgrf(Pg, 10), srf(Rn, 5), rf(Zd, 0); 3680 } 3681 3682 private: 3683 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8, 3684 bool isMerge, bool isFloat) { 3685 starti; 3686 assert(T != Q, "invalid size"); 3687 int sh = 0; 3688 if (imm8 <= 127 && imm8 >= -128) { 3689 sh = 0; 3690 } else if (T != B && imm8 <= 32512 && imm8 >= -32768 && (imm8 & 0xff) == 0) { 3691 sh = 1; 3692 imm8 = (imm8 >> 8); 3693 } else { 3694 guarantee(false, "invalid immediate"); 3695 } 3696 int m = isMerge ? 1 : 0; 3697 f(0b00000101, 31, 24), f(T, 23, 22), f(0b01, 21, 20); 3698 prf(Pg, 16), f(isFloat ? 1 : 0, 15), f(m, 14), f(sh, 13), sf(imm8, 12, 5), rf(Zd, 0); 3699 } 3700 3701 public: 3702 // SVE copy signed integer immediate to vector elements (predicated) 3703 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8, bool isMerge) { 3704 sve_cpy(Zd, T, Pg, imm8, isMerge, /*isFloat*/false); 3705 } 3706 // SVE copy floating-point immediate to vector elements (predicated) 3707 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, double d) { 3708 sve_cpy(Zd, T, Pg, checked_cast<int8_t>(pack(d)), /*isMerge*/true, /*isFloat*/true); 3709 } 3710 3711 // SVE conditionally select elements from two vectors 3712 void sve_sel(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, 3713 FloatRegister Zn, FloatRegister Zm) { 3714 starti; 3715 assert(T != Q, "invalid size"); 3716 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16); 3717 f(0b11, 15, 14), prf(Pg, 10), rf(Zn, 5), rf(Zd, 0); 3718 } 3719 3720 // SVE Permute Vector - Extract 3721 void sve_ext(FloatRegister Zdn, FloatRegister Zm, int imm8) { 3722 starti; 3723 f(0b00000101001, 31, 21), f(imm8 >> 3, 20, 16), f(0b000, 15, 13); 3724 f(imm8 & 0b111, 12, 10), rf(Zm, 5), rf(Zdn, 0); 3725 } 3726 3727 // SVE Integer/Floating-Point Compare - Vectors 3728 #define INSN(NAME, op1, op2, fp) \ 3729 void NAME(Condition cond, PRegister Pd, SIMD_RegVariant T, PRegister Pg, \ 3730 FloatRegister Zn, FloatRegister Zm) { \ 3731 starti; \ 3732 assert(T != Q, "invalid size"); \ 3733 bool is_absolute = op2 == 0b11; \ 3734 if (fp == 1) { \ 3735 assert(T != B, "invalid size"); \ 3736 if (is_absolute) { \ 3737 assert(cond == GT || cond == GE, "invalid condition for fac"); \ 3738 } else { \ 3739 assert(cond != HI && cond != HS, "invalid condition for fcm"); \ 3740 } \ 3741 } \ 3742 int cond_op; \ 3743 switch(cond) { \ 3744 case EQ: cond_op = (op2 << 2) | 0b10; break; \ 3745 case NE: cond_op = (op2 << 2) | 0b11; break; \ 3746 case GE: cond_op = (op2 << 2) | (is_absolute ? 0b01 : 0b00); break; \ 3747 case GT: cond_op = (op2 << 2) | (is_absolute ? 0b11 : 0b01); break; \ 3748 case HI: cond_op = 0b0001; break; \ 3749 case HS: cond_op = 0b0000; break; \ 3750 default: \ 3751 ShouldNotReachHere(); \ 3752 } \ 3753 f(op1, 31, 24), f(T, 23, 22), f(0, 21), rf(Zm, 16), f((cond_op >> 1) & 7, 15, 13); \ 3754 pgrf(Pg, 10), rf(Zn, 5), f(cond_op & 1, 4), prf(Pd, 0); \ 3755 } 3756 3757 INSN(sve_cmp, 0b00100100, 0b10, 0); // Integer compare vectors 3758 INSN(sve_fcm, 0b01100101, 0b01, 1); // Floating-point compare vectors 3759 INSN(sve_fac, 0b01100101, 0b11, 1); // Floating-point absolute compare vectors 3760 #undef INSN 3761 3762 private: 3763 // Convert Assembler::Condition to op encoding - used by sve integer compare encoding 3764 static int assembler_cond_to_sve_op(Condition cond, bool &is_unsigned) { 3765 if (cond == HI || cond == HS || cond == LO || cond == LS) { 3766 is_unsigned = true; 3767 } else { 3768 is_unsigned = false; 3769 } 3770 3771 switch (cond) { 3772 case HI: 3773 case GT: 3774 return 0b0001; 3775 case HS: 3776 case GE: 3777 return 0b0000; 3778 case LO: 3779 case LT: 3780 return 0b0010; 3781 case LS: 3782 case LE: 3783 return 0b0011; 3784 case EQ: 3785 return 0b1000; 3786 case NE: 3787 return 0b1001; 3788 default: 3789 ShouldNotReachHere(); 3790 return -1; 3791 } 3792 } 3793 3794 public: 3795 // SVE Integer Compare - 5 bits signed imm and 7 bits unsigned imm 3796 void sve_cmp(Condition cond, PRegister Pd, SIMD_RegVariant T, 3797 PRegister Pg, FloatRegister Zn, int imm) { 3798 starti; 3799 assert(T != Q, "invalid size"); 3800 bool is_unsigned = false; 3801 int cond_op = assembler_cond_to_sve_op(cond, is_unsigned); 3802 f(is_unsigned ? 0b00100100 : 0b00100101, 31, 24), f(T, 23, 22); 3803 f(is_unsigned ? 0b1 : 0b0, 21); 3804 if (is_unsigned) { 3805 f(imm, 20, 14), f((cond_op >> 1) & 0x1, 13); 3806 } else { 3807 sf(imm, 20, 16), f((cond_op >> 1) & 0x7, 15, 13); 3808 } 3809 pgrf(Pg, 10), rf(Zn, 5), f(cond_op & 0x1, 4), prf(Pd, 0); 3810 } 3811 3812 // SVE Floating-point compare vector with zero 3813 void sve_fcm(Condition cond, PRegister Pd, SIMD_RegVariant T, 3814 PRegister Pg, FloatRegister Zn, double d) { 3815 starti; 3816 assert(T != Q, "invalid size"); 3817 guarantee(d == 0.0, "invalid immediate"); 3818 int cond_op; 3819 switch(cond) { 3820 case EQ: cond_op = 0b100; break; 3821 case GT: cond_op = 0b001; break; 3822 case GE: cond_op = 0b000; break; 3823 case LT: cond_op = 0b010; break; 3824 case LE: cond_op = 0b011; break; 3825 case NE: cond_op = 0b110; break; 3826 default: 3827 ShouldNotReachHere(); 3828 } 3829 f(0b01100101, 31, 24), f(T, 23, 22), f(0b0100, 21, 18), 3830 f((cond_op >> 1) & 0x3, 17, 16), f(0b001, 15, 13), 3831 pgrf(Pg, 10), rf(Zn, 5); 3832 f(cond_op & 0x1, 4), prf(Pd, 0); 3833 } 3834 3835 // SVE unpack vector elements 3836 protected: 3837 void _sve_xunpk(bool is_unsigned, bool is_high, FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn) { 3838 starti; 3839 assert(T != B && T != Q, "invalid size"); 3840 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1100, 21, 18); 3841 f(is_unsigned ? 1 : 0, 17), f(is_high ? 1 : 0, 16), 3842 f(0b001110, 15, 10), rf(Zn, 5), rf(Zd, 0); 3843 } 3844 3845 public: 3846 #define INSN(NAME, is_unsigned, is_high) \ 3847 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn) { \ 3848 _sve_xunpk(is_unsigned, is_high, Zd, T, Zn); \ 3849 } 3850 3851 INSN(sve_uunpkhi, true, true ); // Unsigned unpack and extend half of vector - high half 3852 INSN(sve_uunpklo, true, false); // Unsigned unpack and extend half of vector - low half 3853 INSN(sve_sunpkhi, false, true ); // Signed unpack and extend half of vector - high half 3854 INSN(sve_sunpklo, false, false); // Signed unpack and extend half of vector - low half 3855 #undef INSN 3856 3857 // SVE unpack predicate elements 3858 #define INSN(NAME, op) \ 3859 void NAME(PRegister Pd, PRegister Pn) { \ 3860 starti; \ 3861 f(0b000001010011000, 31, 17), f(op, 16), f(0b0100000, 15, 9); \ 3862 prf(Pn, 5), f(0b0, 4), prf(Pd, 0); \ 3863 } 3864 3865 INSN(sve_punpkhi, 0b1); // Unpack and widen high half of predicate 3866 INSN(sve_punpklo, 0b0); // Unpack and widen low half of predicate 3867 #undef INSN 3868 3869 // SVE permute vector elements 3870 #define INSN(NAME, op) \ 3871 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \ 3872 starti; \ 3873 assert(T != Q, "invalid size"); \ 3874 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16); \ 3875 f(0b01101, 15, 11), f(op, 10), rf(Zn, 5), rf(Zd, 0); \ 3876 } 3877 3878 INSN(sve_uzp1, 0b0); // Concatenate even elements from two vectors 3879 INSN(sve_uzp2, 0b1); // Concatenate odd elements from two vectors 3880 #undef INSN 3881 3882 // SVE permute predicate elements 3883 #define INSN(NAME, op) \ 3884 void NAME(PRegister Pd, SIMD_RegVariant T, PRegister Pn, PRegister Pm) { \ 3885 starti; \ 3886 assert(T != Q, "invalid size"); \ 3887 f(0b00000101, 31, 24), f(T, 23, 22), f(0b10, 21, 20), prf(Pm, 16); \ 3888 f(0b01001, 15, 11), f(op, 10), f(0b0, 9), prf(Pn, 5), f(0b0, 4), prf(Pd, 0); \ 3889 } 3890 3891 INSN(sve_uzp1, 0b0); // Concatenate even elements from two predicates 3892 INSN(sve_uzp2, 0b1); // Concatenate odd elements from two predicates 3893 #undef INSN 3894 3895 // SVE integer compare scalar count and limit 3896 #define INSN(NAME, sf, op) \ 3897 void NAME(PRegister Pd, SIMD_RegVariant T, Register Rn, Register Rm) { \ 3898 starti; \ 3899 assert(T != Q, "invalid register variant"); \ 3900 f(0b00100101, 31, 24), f(T, 23, 22), f(1, 21), \ 3901 zrf(Rm, 16), f(0, 15, 13), f(sf, 12), f(op >> 1, 11, 10), \ 3902 zrf(Rn, 5), f(op & 1, 4), prf(Pd, 0); \ 3903 } 3904 // While incrementing signed scalar less than scalar 3905 INSN(sve_whileltw, 0b0, 0b010); 3906 INSN(sve_whilelt, 0b1, 0b010); 3907 // While incrementing signed scalar less than or equal to scalar 3908 INSN(sve_whilelew, 0b0, 0b011); 3909 INSN(sve_whilele, 0b1, 0b011); 3910 // While incrementing unsigned scalar lower than scalar 3911 INSN(sve_whilelow, 0b0, 0b110); 3912 INSN(sve_whilelo, 0b1, 0b110); 3913 // While incrementing unsigned scalar lower than or the same as scalar 3914 INSN(sve_whilelsw, 0b0, 0b111); 3915 INSN(sve_whilels, 0b1, 0b111); 3916 #undef INSN 3917 3918 // SVE predicate reverse 3919 void sve_rev(PRegister Pd, SIMD_RegVariant T, PRegister Pn) { 3920 starti; 3921 assert(T != Q, "invalid size"); 3922 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1101000100000, 21, 9); 3923 prf(Pn, 5), f(0, 4), prf(Pd, 0); 3924 } 3925 3926 // SVE partition break condition 3927 #define INSN(NAME, op) \ 3928 void NAME(PRegister Pd, PRegister Pg, PRegister Pn, bool isMerge) { \ 3929 starti; \ 3930 f(0b00100101, 31, 24), f(op, 23, 22), f(0b01000001, 21, 14); \ 3931 prf(Pg, 10), f(0b0, 9), prf(Pn, 5), f(isMerge ? 1 : 0, 4), prf(Pd, 0); \ 3932 } 3933 3934 INSN(sve_brka, 0b00); // Break after first true condition 3935 INSN(sve_brkb, 0b10); // Break before first true condition 3936 #undef INSN 3937 3938 // Element count and increment scalar (SVE) 3939 #define INSN(NAME, TYPE) \ 3940 void NAME(Register Xdn, unsigned imm4 = 1, int pattern = 0b11111) { \ 3941 starti; \ 3942 f(0b00000100, 31, 24), f(TYPE, 23, 22), f(0b10, 21, 20); \ 3943 f(imm4 - 1, 19, 16), f(0b11100, 15, 11), f(0, 10), f(pattern, 9, 5), rf(Xdn, 0); \ 3944 } 3945 3946 INSN(sve_cntb, B); // Set scalar to multiple of 8-bit predicate constraint element count 3947 INSN(sve_cnth, H); // Set scalar to multiple of 16-bit predicate constraint element count 3948 INSN(sve_cntw, S); // Set scalar to multiple of 32-bit predicate constraint element count 3949 INSN(sve_cntd, D); // Set scalar to multiple of 64-bit predicate constraint element count 3950 #undef INSN 3951 3952 // Set scalar to active predicate element count 3953 void sve_cntp(Register Xd, SIMD_RegVariant T, PRegister Pg, PRegister Pn) { 3954 starti; 3955 assert(T != Q, "invalid size"); 3956 f(0b00100101, 31, 24), f(T, 23, 22), f(0b10000010, 21, 14); 3957 prf(Pg, 10), f(0, 9), prf(Pn, 5), rf(Xd, 0); 3958 } 3959 3960 // SVE convert signed integer to floating-point (predicated) 3961 void sve_scvtf(FloatRegister Zd, SIMD_RegVariant T_dst, PRegister Pg, 3962 FloatRegister Zn, SIMD_RegVariant T_src) { 3963 starti; 3964 assert(T_src != B && T_dst != B && T_src != Q && T_dst != Q && 3965 (T_src != H || T_dst == T_src), "invalid register variant"); 3966 int opc = T_dst; 3967 int opc2 = T_src; 3968 // In most cases we can treat T_dst, T_src as opc, opc2, 3969 // except for the following two combinations. 3970 // +-----+------+---+------------------------------------+ 3971 // | opc | opc2 | U | Instruction Details | 3972 // +-----+------+---+------------------------------------+ 3973 // | 11 | 00 | 0 | SCVTF - 32-bit to double-precision | 3974 // | 11 | 10 | 0 | SCVTF - 64-bit to single-precision | 3975 // +-----+------+---+------------------------------------+ 3976 if (T_src == S && T_dst == D) { 3977 opc = 0b11; 3978 opc2 = 0b00; 3979 } else if (T_src == D && T_dst == S) { 3980 opc = 0b11; 3981 opc2 = 0b10; 3982 } 3983 f(0b01100101, 31, 24), f(opc, 23, 22), f(0b010, 21, 19); 3984 f(opc2, 18, 17), f(0b0101, 16, 13); 3985 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); 3986 } 3987 3988 // SVE floating-point convert to signed integer, rounding toward zero (predicated) 3989 void sve_fcvtzs(FloatRegister Zd, SIMD_RegVariant T_dst, PRegister Pg, 3990 FloatRegister Zn, SIMD_RegVariant T_src) { 3991 starti; 3992 assert(T_src != B && T_dst != B && T_src != Q && T_dst != Q && 3993 (T_dst != H || T_src == H), "invalid register variant"); 3994 int opc = T_src; 3995 int opc2 = T_dst; 3996 // In most cases we can treat T_src, T_dst as opc, opc2, 3997 // except for the following two combinations. 3998 // +-----+------+---+-------------------------------------+ 3999 // | opc | opc2 | U | Instruction Details | 4000 // +-----+------+---+-------------------------------------+ 4001 // | 11 | 10 | 0 | FCVTZS - single-precision to 64-bit | 4002 // | 11 | 00 | 0 | FCVTZS - double-precision to 32-bit | 4003 // +-----+------+---+-------------------------------------+ 4004 if (T_src == S && T_dst == D) { 4005 opc = 0b11; 4006 opc2 = 0b10; 4007 } else if (T_src == D && T_dst == S) { 4008 opc = 0b11; 4009 opc2 = 0b00; 4010 } 4011 f(0b01100101, 31, 24), f(opc, 23, 22), f(0b011, 21, 19); 4012 f(opc2, 18, 17), f(0b0101, 16, 13); 4013 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); 4014 } 4015 4016 // SVE floating-point convert precision (predicated) 4017 void sve_fcvt(FloatRegister Zd, SIMD_RegVariant T_dst, PRegister Pg, 4018 FloatRegister Zn, SIMD_RegVariant T_src) { 4019 starti; 4020 assert(T_src != B && T_dst != B && T_src != Q && T_dst != Q && 4021 T_src != T_dst, "invalid register variant"); 4022 // The encodings of fields op1 (bits 17-16) and op2 (bits 23-22) 4023 // depend on T_src and T_dst as given below - 4024 // +-----+------+---------------------------------------------+ 4025 // | op2 | op1 | Instruction Details | 4026 // +-----+------+---------------------------------------------+ 4027 // | 10 | 01 | FCVT - half-precision to single-precision | 4028 // | 11 | 01 | FCVT - half-precision to double-precision | 4029 // | 10 | 00 | FCVT - single-precision to half-precision | 4030 // | 11 | 11 | FCVT - single-precision to double-precision | 4031 // | 11 | 00 | FCVT - double-preciison to half-precision | 4032 // | 11 | 10 | FCVT - double-precision to single-precision | 4033 // +-----+------+---+-----------------------------------------+ 4034 int op1 = 0b00; 4035 int op2 = (T_src == D || T_dst == D) ? 0b11 : 0b10; 4036 if (T_src == H) { 4037 op1 = 0b01; 4038 } else if (T_dst == S) { 4039 op1 = 0b10; 4040 } else if (T_dst == D) { 4041 op1 = 0b11; 4042 } 4043 f(0b01100101, 31, 24), f(op2, 23, 22), f(0b0010, 21, 18); 4044 f(op1, 17, 16), f(0b101, 15, 13); 4045 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); 4046 } 4047 4048 // SVE extract element to general-purpose register 4049 #define INSN(NAME, before) \ 4050 void NAME(Register Rd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn) { \ 4051 starti; \ 4052 f(0b00000101, 31, 24), f(T, 23, 22), f(0b10000, 21, 17); \ 4053 f(before, 16), f(0b101, 15, 13); \ 4054 pgrf(Pg, 10), rf(Zn, 5), rf(Rd, 0); \ 4055 } 4056 4057 INSN(sve_lasta, 0b0); 4058 INSN(sve_lastb, 0b1); 4059 #undef INSN 4060 4061 // SVE extract element to SIMD&FP scalar register 4062 #define INSN(NAME, before) \ 4063 void NAME(FloatRegister Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn) { \ 4064 starti; \ 4065 f(0b00000101, 31, 24), f(T, 23, 22), f(0b10001, 21, 17); \ 4066 f(before, 16), f(0b100, 15, 13); \ 4067 pgrf(Pg, 10), rf(Zn, 5), rf(Vd, 0); \ 4068 } 4069 4070 INSN(sve_lasta, 0b0); 4071 INSN(sve_lastb, 0b1); 4072 #undef INSN 4073 4074 // SVE reverse within elements 4075 #define INSN(NAME, opc, cond) \ 4076 void NAME(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn) { \ 4077 starti; \ 4078 assert(cond, "invalid size"); \ 4079 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1001, 21, 18), f(opc, 17, 16); \ 4080 f(0b100, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); \ 4081 } 4082 4083 INSN(sve_revb, 0b00, T == H || T == S || T == D); 4084 INSN(sve_rbit, 0b11, T != Q); 4085 #undef INSN 4086 4087 // SVE Create index starting from general-purpose register and incremented by immediate 4088 void sve_index(FloatRegister Zd, SIMD_RegVariant T, Register Rn, int imm) { 4089 starti; 4090 assert(T != Q, "invalid size"); 4091 f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21); 4092 sf(imm, 20, 16), f(0b010001, 15, 10); 4093 rf(Rn, 5), rf(Zd, 0); 4094 } 4095 4096 // SVE create index starting from and incremented by immediate 4097 void sve_index(FloatRegister Zd, SIMD_RegVariant T, int imm1, int imm2) { 4098 starti; 4099 assert(T != Q, "invalid size"); 4100 f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21); 4101 sf(imm2, 20, 16), f(0b010000, 15, 10); 4102 sf(imm1, 9, 5), rf(Zd, 0); 4103 } 4104 4105 // SVE programmable table lookup/permute using vector of element indices 4106 void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { 4107 starti; 4108 assert(T != Q, "invalid size"); 4109 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16); 4110 f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0); 4111 } 4112 4113 // Shuffle active elements of vector to the right and fill with zero 4114 void sve_compact(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, PRegister Pg) { 4115 starti; 4116 assert(T == S || T == D, "invalid size"); 4117 f(0b00000101, 31, 24), f(T, 23, 22), f(0b100001100, 21, 13); 4118 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); 4119 } 4120 4121 // SVE2 Count matching elements in vector 4122 void sve_histcnt(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, 4123 FloatRegister Zn, FloatRegister Zm) { 4124 starti; 4125 assert(T == S || T == D, "invalid size"); 4126 f(0b01000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16); 4127 f(0b110, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); 4128 } 4129 4130 // SVE2 bitwise permute 4131 #define INSN(NAME, opc) \ 4132 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \ 4133 starti; \ 4134 assert(T != Q, "invalid size"); \ 4135 f(0b01000101, 31, 24), f(T, 23, 22), f(0b0, 21); \ 4136 rf(Zm, 16), f(0b1011, 15, 12), f(opc, 11, 10); \ 4137 rf(Zn, 5), rf(Zd, 0); \ 4138 } 4139 4140 INSN(sve_bext, 0b00); 4141 INSN(sve_bdep, 0b01); 4142 #undef INSN 4143 4144 // SVE2 bitwise ternary operations 4145 #define INSN(NAME, opc) \ 4146 void NAME(FloatRegister Zdn, FloatRegister Zm, FloatRegister Zk) { \ 4147 starti; \ 4148 f(0b00000100, 31, 24), f(opc, 23, 21), rf(Zm, 16); \ 4149 f(0b001110, 15, 10), rf(Zk, 5), rf(Zdn, 0); \ 4150 } 4151 4152 INSN(sve_eor3, 0b001); // Bitwise exclusive OR of three vectors 4153 #undef INSN 4154 4155 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 4156 } 4157 4158 // Stack overflow checking 4159 virtual void bang_stack_with_offset(int offset); 4160 4161 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm); 4162 static bool operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm); 4163 static bool operand_valid_for_add_sub_immediate(int64_t imm); 4164 static bool operand_valid_for_sve_add_sub_immediate(int64_t imm); 4165 static bool operand_valid_for_float_immediate(double imm); 4166 static int operand_valid_for_movi_immediate(uint64_t imm64, SIMD_Arrangement T); 4167 4168 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 4169 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 4170 }; 4171 4172 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a, 4173 Assembler::Membar_mask_bits b) { 4174 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b)); 4175 } 4176 4177 Instruction_aarch64::~Instruction_aarch64() { 4178 assem->emit_int32(insn); 4179 assert_cond(get_bits() == 0xffffffff); 4180 } 4181 4182 #undef f 4183 #undef sf 4184 #undef rf 4185 #undef srf 4186 #undef zrf 4187 #undef prf 4188 #undef pgrf 4189 #undef fixed 4190 4191 #undef starti 4192 4193 // Invert a condition 4194 inline Assembler::Condition operator~(const Assembler::Condition cond) { 4195 return Assembler::Condition(int(cond) ^ 1); 4196 } 4197 4198 extern "C" void das(uint64_t start, int len); 4199 4200 #endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP