1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_ASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_ASSEMBLER_AARCH64_HPP
28
29 #include "asm/register.hpp"
30 #include "cppstdlib/type_traits.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "utilities/checkedCast.hpp"
33 #include "utilities/debug.hpp"
34 #include "utilities/globalDefinitions.hpp"
35 #include "utilities/macros.hpp"
36
37 #ifdef __GNUC__
38
39 // __nop needs volatile so that compiler doesn't optimize it away
40 #define NOP() asm volatile ("nop");
41
42 #elif defined(_MSC_VER)
43
44 // Use MSVC intrinsic: https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019#I
45 #define NOP() __nop();
46
47 #endif
48
49
50 // definitions of various symbolic names for machine registers
51
52 // First intercalls between C and Java which use 8 general registers
53 // and 8 floating registers
54
55 // we also have to copy between x86 and ARM registers but that's a
56 // secondary complication -- not all code employing C call convention
57 // executes as x86 code though -- we generate some of it
58
59 class Argument {
60 public:
61 enum {
62 n_int_register_parameters_c = 8, // r0, r1, ... r7 (c_rarg0, c_rarg1, ...)
63 n_float_register_parameters_c = 8, // v0, v1, ... v7 (c_farg0, c_farg1, ... )
64
65 n_int_register_parameters_j = 8, // r1, ... r7, r0 (rj_rarg0, j_rarg1, ...
66 n_float_register_parameters_j = 8 // v0, v1, ... v7 (j_farg0, j_farg1, ...
67 };
68 };
69
70 constexpr Register c_rarg0 = r0;
71 constexpr Register c_rarg1 = r1;
72 constexpr Register c_rarg2 = r2;
73 constexpr Register c_rarg3 = r3;
74 constexpr Register c_rarg4 = r4;
75 constexpr Register c_rarg5 = r5;
76 constexpr Register c_rarg6 = r6;
77 constexpr Register c_rarg7 = r7;
78
79 constexpr FloatRegister c_farg0 = v0;
80 constexpr FloatRegister c_farg1 = v1;
81 constexpr FloatRegister c_farg2 = v2;
82 constexpr FloatRegister c_farg3 = v3;
83 constexpr FloatRegister c_farg4 = v4;
84 constexpr FloatRegister c_farg5 = v5;
85 constexpr FloatRegister c_farg6 = v6;
86 constexpr FloatRegister c_farg7 = v7;
87
88 // Symbolically name the register arguments used by the Java calling convention.
89 // We have control over the convention for java so we can do what we please.
90 // What pleases us is to offset the java calling convention so that when
91 // we call a suitable jni method the arguments are lined up and we don't
92 // have to do much shuffling. A suitable jni method is non-static and a
93 // small number of arguments
94 //
95 // |--------------------------------------------------------------------|
96 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 c_rarg6 c_rarg7 |
97 // |--------------------------------------------------------------------|
98 // | r0 r1 r2 r3 r4 r5 r6 r7 |
99 // |--------------------------------------------------------------------|
100 // | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 |
101 // |--------------------------------------------------------------------|
102
103
104 constexpr Register j_rarg0 = c_rarg1;
105 constexpr Register j_rarg1 = c_rarg2;
106 constexpr Register j_rarg2 = c_rarg3;
107 constexpr Register j_rarg3 = c_rarg4;
108 constexpr Register j_rarg4 = c_rarg5;
109 constexpr Register j_rarg5 = c_rarg6;
110 constexpr Register j_rarg6 = c_rarg7;
111 constexpr Register j_rarg7 = c_rarg0;
112
113 // Java floating args are passed as per C
114
115 constexpr FloatRegister j_farg0 = v0;
116 constexpr FloatRegister j_farg1 = v1;
117 constexpr FloatRegister j_farg2 = v2;
118 constexpr FloatRegister j_farg3 = v3;
119 constexpr FloatRegister j_farg4 = v4;
120 constexpr FloatRegister j_farg5 = v5;
121 constexpr FloatRegister j_farg6 = v6;
122 constexpr FloatRegister j_farg7 = v7;
123
124 // registers used to hold VM data either temporarily within a method
125 // or across method calls
126
127 // volatile (caller-save) registers
128
129 // r8 is used for indirect result location return
130 // we use it and r9 as scratch registers
131 constexpr Register rscratch1 = r8;
132 constexpr Register rscratch2 = r9;
133
134 // current method -- must be in a call-clobbered register
135 constexpr Register rmethod = r12;
136
137 // non-volatile (callee-save) registers are r16-29
138 // of which the following are dedicated global state
139
140 constexpr Register lr = r30; // link register
141 constexpr Register rfp = r29; // frame pointer
142 constexpr Register rthread = r28; // current thread
143 constexpr Register rheapbase = r27; // base of heap
144 constexpr Register rcpool = r26; // constant pool cache
145 constexpr Register rlocals = r24; // locals on stack
146 constexpr Register rbcp = r22; // bytecode pointer
147 constexpr Register rdispatch = r21; // dispatch table base
148 constexpr Register esp = r20; // Java expression stack pointer
149 constexpr Register r19_sender_sp = r19; // sender's SP while in interpreter
150
151 // Preserved predicate register with all elements set TRUE.
152 constexpr PRegister ptrue = p7;
153
154 #define assert_cond(ARG1) assert(ARG1, #ARG1)
155
156 namespace asm_util {
157 uint32_t encode_logical_immediate(bool is32, uint64_t imm);
158 uint32_t encode_sve_logical_immediate(unsigned elembits, uint64_t imm);
159 bool operand_valid_for_immediate_bits(int64_t imm, unsigned nbits);
160 };
161
162 using namespace asm_util;
163
164
165 class Assembler;
166
167 class Instruction_aarch64 {
168 unsigned insn;
169 #ifdef ASSERT
170 unsigned bits;
171 #endif
172 Assembler *assem;
173
174 public:
175
176 Instruction_aarch64(class Assembler *as) {
177 #ifdef ASSERT
178 bits = 0;
179 #endif
180 insn = 0;
181 assem = as;
182 }
183
184 inline ~Instruction_aarch64();
185
186 unsigned &get_insn() { return insn; }
187 #ifdef ASSERT
188 unsigned &get_bits() { return bits; }
189 #endif
190
191 static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) {
192 union {
193 unsigned u;
194 int n;
195 };
196
197 u = val << (31 - hi);
198 n = n >> (31 - hi + lo);
199 return n;
200 }
201
202 static inline uint32_t extract(uint32_t val, int msb, int lsb) {
203 int nbits = msb - lsb + 1;
204 assert_cond(msb >= lsb);
205 uint32_t mask = checked_cast<uint32_t>(right_n_bits(nbits));
206 uint32_t result = val >> lsb;
207 result &= mask;
208 return result;
209 }
210
211 static inline int32_t sextract(uint32_t val, int msb, int lsb) {
212 uint32_t uval = extract(val, msb, lsb);
213 return extend(uval, msb - lsb);
214 }
215
216 static ALWAYSINLINE void patch(address a, int msb, int lsb, uint64_t val) {
217 int nbits = msb - lsb + 1;
218 guarantee(val < (1ULL << nbits), "Field too big for insn");
219 assert_cond(msb >= lsb);
220 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
221 val <<= lsb;
222 mask <<= lsb;
223 unsigned target = *(unsigned *)a;
224 target &= ~mask;
225 target |= (unsigned)val;
226 *(unsigned *)a = target;
227 }
228
229 static void spatch(address a, int msb, int lsb, int64_t val) {
230 int nbits = msb - lsb + 1;
231 int64_t chk = val >> (nbits - 1);
232 guarantee (chk == -1 || chk == 0, "Field too big for insn at " INTPTR_FORMAT, p2i(a));
233 uint64_t uval = val;
234 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
235 uval &= mask;
236 uval <<= lsb;
237 mask <<= lsb;
238 unsigned target = *(unsigned *)a;
239 target &= ~mask;
240 target |= (unsigned)uval;
241 *(unsigned *)a = target;
242 }
243
244 void f(unsigned val, int msb, int lsb) {
245 int nbits = msb - lsb + 1;
246 guarantee(val < (1ULL << nbits), "Field too big for insn");
247 assert_cond(msb >= lsb);
248 val <<= lsb;
249 insn |= val;
250 #ifdef ASSERT
251 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
252 mask <<= lsb;
253 assert_cond((bits & mask) == 0);
254 bits |= mask;
255 #endif
256 }
257
258 void f(unsigned val, int bit) {
259 f(val, bit, bit);
260 }
261
262 void sf(int64_t val, int msb, int lsb) {
263 int nbits = msb - lsb + 1;
264 int64_t chk = val >> (nbits - 1);
265 guarantee (chk == -1 || chk == 0, "Field too big for insn");
266 uint64_t uval = val;
267 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
268 uval &= mask;
269 f((unsigned)uval, lsb + nbits - 1, lsb);
270 }
271
272 void rf(Register r, int lsb) {
273 f(r->raw_encoding(), lsb + 4, lsb);
274 }
275
276 // reg|ZR
277 void zrf(Register r, int lsb) {
278 f(r->raw_encoding() - (r == zr), lsb + 4, lsb);
279 }
280
281 // reg|SP
282 void srf(Register r, int lsb) {
283 f(r == sp ? 31 : r->raw_encoding(), lsb + 4, lsb);
284 }
285
286 void rf(FloatRegister r, int lsb) {
287 f(r->raw_encoding(), lsb + 4, lsb);
288 }
289
290 //<0-15>reg: As `rf(FloatRegister)`, but only the lower 16 FloatRegisters are allowed.
291 void lrf(FloatRegister r, int lsb) {
292 f(r->raw_encoding(), lsb + 3, lsb);
293 }
294
295 void prf(PRegister r, int lsb) {
296 f(r->raw_encoding(), lsb + 3, lsb);
297 }
298
299 void pgrf(PRegister r, int lsb) {
300 f(r->raw_encoding(), lsb + 2, lsb);
301 }
302
303 unsigned get(int msb = 31, int lsb = 0) {
304 int nbits = msb - lsb + 1;
305 unsigned mask = checked_cast<unsigned>(right_n_bits(nbits)) << lsb;
306 assert_cond((bits & mask) == mask);
307 return (insn & mask) >> lsb;
308 }
309 };
310
311 #define starti Instruction_aarch64 current_insn(this);
312
313 class PrePost {
314 int _offset;
315 Register _r;
316 protected:
317 PrePost(Register reg, int o) : _offset(o), _r(reg) { }
318 ~PrePost() = default;
319 PrePost(const PrePost&) = default;
320 PrePost& operator=(const PrePost&) = default;
321 public:
322 int offset() const { return _offset; }
323 Register reg() const { return _r; }
324 };
325
326 class Pre : public PrePost {
327 public:
328 Pre(Register reg, int o) : PrePost(reg, o) { }
329 };
330
331 class Post : public PrePost {
332 Register _idx;
333 bool _is_postreg;
334 public:
335 Post(Register reg, int o) : PrePost(reg, o), _idx(noreg), _is_postreg(false) {}
336 Post(Register reg, Register idx) : PrePost(reg, 0), _idx(idx), _is_postreg(true) {}
337 Register idx_reg() const { return _idx; }
338 bool is_postreg() const { return _is_postreg; }
339 };
340
341 namespace ext
342 {
343 enum operation { uxtb, uxth, uxtw, uxtx, sxtb, sxth, sxtw, sxtx };
344 };
345
346 // Addressing modes
347 class Address {
348 public:
349
350 enum mode { no_mode, base_plus_offset, pre, post, post_reg,
351 base_plus_offset_reg, literal };
352
353 // Shift and extend for base reg + reg offset addressing
354 class extend {
355 int _option, _shift;
356 ext::operation _op;
357 public:
358 extend() { }
359 extend(int s, int o, ext::operation op) : _option(o), _shift(s), _op(op) { }
360 int option() const{ return _option; }
361 int shift() const { return _shift; }
362 ext::operation op() const { return _op; }
363 };
364
365 static extend uxtw(int shift = -1) { return extend(shift, 0b010, ext::uxtw); }
366 static extend lsl(int shift = -1) { return extend(shift, 0b011, ext::uxtx); }
367 static extend sxtw(int shift = -1) { return extend(shift, 0b110, ext::sxtw); }
368 static extend sxtx(int shift = -1) { return extend(shift, 0b111, ext::sxtx); }
369
370 private:
371 struct Nonliteral {
372 Nonliteral(Register base, Register index, int64_t offset, extend ext = extend())
373 : _base(base), _index(index), _offset(offset), _ext(ext) {}
374 Register _base;
375 Register _index;
376 int64_t _offset;
377 extend _ext;
378 };
379
380 struct Literal {
381 Literal(address target, const RelocationHolder& rspec)
382 : _target(target), _rspec(rspec) {}
383
384 // If the target is far we'll need to load the ea of this to a
385 // register to reach it. Otherwise if near we can do PC-relative
386 // addressing.
387 address _target;
388
389 RelocationHolder _rspec;
390 };
391
392 void assert_is_nonliteral() const NOT_DEBUG_RETURN;
393 void assert_is_literal() const NOT_DEBUG_RETURN;
394
395 // Discriminated union, based on _mode.
396 // - no_mode: uses dummy _nonliteral, for ease of copying.
397 // - literal: only _literal is used.
398 // - others: only _nonliteral is used.
399 enum mode _mode;
400 union {
401 Nonliteral _nonliteral;
402 Literal _literal;
403 };
404
405 // Helper for copy constructor and assignment operator.
406 // Copy mode-relevant part of a into this.
407 void copy_data(const Address& a) {
408 assert(_mode == a._mode, "precondition");
409 if (_mode == literal) {
410 new (&_literal) Literal(a._literal);
411 } else {
412 // non-literal mode or no_mode.
413 new (&_nonliteral) Nonliteral(a._nonliteral);
414 }
415 }
416
417 public:
418 // no_mode initializes _nonliteral for ease of copying.
419 Address() :
420 _mode(no_mode),
421 _nonliteral(noreg, noreg, 0)
422 {}
423
424 Address(Register r) :
425 _mode(base_plus_offset),
426 _nonliteral(r, noreg, 0)
427 {}
428
429 template<typename T, ENABLE_IF(std::is_integral<T>::value)>
430 Address(Register r, T o) :
431 _mode(base_plus_offset),
432 _nonliteral(r, noreg, o)
433 {}
434
435 Address(Register r, ByteSize disp) : Address(r, in_bytes(disp)) {}
436
437 Address(Register r, Register r1, extend ext = lsl()) :
438 _mode(base_plus_offset_reg),
439 _nonliteral(r, r1, 0, ext)
440 {}
441
442 Address(Pre p) :
443 _mode(pre),
444 _nonliteral(p.reg(), noreg, p.offset())
445 {}
446
447 Address(Post p) :
448 _mode(p.is_postreg() ? post_reg : post),
449 _nonliteral(p.reg(), p.idx_reg(), p.offset())
450 {}
451
452 Address(address target, const RelocationHolder& rspec) :
453 _mode(literal),
454 _literal(target, rspec)
455 {}
456
457 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type);
458
459 Address(Register base, RegisterOrConstant index, extend ext = lsl()) {
460 if (index.is_register()) {
461 _mode = base_plus_offset_reg;
462 new (&_nonliteral) Nonliteral(base, index.as_register(), 0, ext);
463 } else {
464 guarantee(ext.option() == ext::uxtx, "should be");
465 assert(index.is_constant(), "should be");
466 _mode = base_plus_offset;
467 new (&_nonliteral) Nonliteral(base,
468 noreg,
469 index.as_constant() << ext.shift());
470 }
471 }
472
473 Address(const Address& a) : _mode(a._mode) { copy_data(a); }
474
475 // Verify the value is trivially destructible regardless of mode, so our
476 // destructor can also be trivial, and so our assignment operator doesn't
477 // need to destruct the old value before copying over it.
478 static_assert(std::is_trivially_destructible<Literal>::value, "must be");
479 static_assert(std::is_trivially_destructible<Nonliteral>::value, "must be");
480
481 Address& operator=(const Address& a) {
482 _mode = a._mode;
483 copy_data(a);
484 return *this;
485 }
486
487 ~Address() = default;
488
489 Register base() const {
490 assert_is_nonliteral();
491 return _nonliteral._base;
492 }
493
494 int64_t offset() const {
495 assert_is_nonliteral();
496 return _nonliteral._offset;
497 }
498
499 Register index() const {
500 assert_is_nonliteral();
501 return _nonliteral._index;
502 }
503
504 extend ext() const {
505 assert_is_nonliteral();
506 return _nonliteral._ext;
507 }
508
509 mode getMode() const {
510 return _mode;
511 }
512
513 bool uses(Register reg) const {
514 switch (_mode) {
515 case literal:
516 case no_mode:
517 return false;
518 case base_plus_offset:
519 case base_plus_offset_reg:
520 case pre:
521 case post:
522 case post_reg:
523 return base() == reg || index() == reg;
524 default:
525 ShouldNotReachHere();
526 return false;
527 }
528 }
529
530 address target() const {
531 assert_is_literal();
532 return _literal._target;
533 }
534
535 const RelocationHolder& rspec() const {
536 assert_is_literal();
537 return _literal._rspec;
538 }
539
540 void encode(Instruction_aarch64 *i) const {
541 i->f(0b111, 29, 27);
542 i->srf(base(), 5);
543
544 switch(_mode) {
545 case base_plus_offset:
546 {
547 unsigned size = i->get(31, 30);
548 if (i->get(26, 26) && i->get(23, 23)) {
549 // SIMD Q Type - Size = 128 bits
550 assert(size == 0, "bad size");
551 size = 0b100;
552 }
553 assert(offset_ok_for_immed(offset(), size),
554 "must be, was: " INT64_FORMAT ", %d", offset(), size);
555 unsigned mask = (1 << size) - 1;
556 if (offset() < 0 || offset() & mask) {
557 i->f(0b00, 25, 24);
558 i->f(0, 21), i->f(0b00, 11, 10);
559 i->sf(offset(), 20, 12);
560 } else {
561 i->f(0b01, 25, 24);
562 i->f(checked_cast<unsigned>(offset() >> size), 21, 10);
563 }
564 }
565 break;
566
567 case base_plus_offset_reg:
568 {
569 i->f(0b00, 25, 24);
570 i->f(1, 21);
571 i->rf(index(), 16);
572 i->f(ext().option(), 15, 13);
573 unsigned size = i->get(31, 30);
574 if (i->get(26, 26) && i->get(23, 23)) {
575 // SIMD Q Type - Size = 128 bits
576 assert(size == 0, "bad size");
577 size = 0b100;
578 }
579 if (size == 0) // It's a byte
580 i->f(ext().shift() >= 0, 12);
581 else {
582 guarantee(ext().shift() <= 0 || ext().shift() == (int)size, "bad shift");
583 i->f(ext().shift() > 0, 12);
584 }
585 i->f(0b10, 11, 10);
586 }
587 break;
588
589 case pre:
590 i->f(0b00, 25, 24);
591 i->f(0, 21), i->f(0b11, 11, 10);
592 i->sf(offset(), 20, 12);
593 break;
594
595 case post:
596 i->f(0b00, 25, 24);
597 i->f(0, 21), i->f(0b01, 11, 10);
598 i->sf(offset(), 20, 12);
599 break;
600
601 default:
602 ShouldNotReachHere();
603 }
604 }
605
606 void encode_pair(Instruction_aarch64 *i) const {
607 switch(_mode) {
608 case base_plus_offset:
609 i->f(0b010, 25, 23);
610 break;
611 case pre:
612 i->f(0b011, 25, 23);
613 break;
614 case post:
615 i->f(0b001, 25, 23);
616 break;
617 default:
618 ShouldNotReachHere();
619 }
620
621 unsigned size; // Operand shift in 32-bit words
622
623 if (i->get(26, 26)) { // float
624 switch(i->get(31, 30)) {
625 case 0b10:
626 size = 2; break;
627 case 0b01:
628 size = 1; break;
629 case 0b00:
630 size = 0; break;
631 default:
632 ShouldNotReachHere();
633 size = 0; // unreachable
634 }
635 } else {
636 size = i->get(31, 31);
637 }
638
639 size = 4 << size;
640 guarantee(offset() % size == 0, "bad offset");
641 i->sf(offset() / size, 21, 15);
642 i->srf(base(), 5);
643 }
644
645 void encode_nontemporal_pair(Instruction_aarch64 *i) const {
646 guarantee(_mode == base_plus_offset, "Bad addressing mode for nontemporal op");
647 i->f(0b000, 25, 23);
648 unsigned size = i->get(31, 31);
649 size = 4 << size;
650 guarantee(offset() % size == 0, "bad offset");
651 i->sf(offset() / size, 21, 15);
652 i->srf(base(), 5);
653 }
654
655 void lea(MacroAssembler *, Register) const;
656
657 static bool offset_ok_for_immed(int64_t offset, uint shift);
658
659 static bool offset_ok_for_sve_immed(int64_t offset, int shift, int vl /* sve vector length */) {
660 if (offset % vl == 0) {
661 // Convert address offset into sve imm offset (MUL VL).
662 int64_t sve_offset = offset / vl;
663 int32_t range = 1 << (shift - 1);
664 if ((-range <= sve_offset) && (sve_offset < range)) {
665 // sve_offset can be encoded
666 return true;
667 }
668 }
669 return false;
670 }
671 };
672
673 // Convenience classes
674 class RuntimeAddress: public Address {
675
676 public:
677
678 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {}
679
680 };
681
682 class OopAddress: public Address {
683
684 public:
685
686 OopAddress(address target) : Address(target, relocInfo::oop_type){}
687
688 };
689
690 class ExternalAddress: public Address {
691 private:
692 static relocInfo::relocType reloc_for_target(address target) {
693 // Sometimes ExternalAddress is used for values which aren't
694 // exactly addresses, like the card table base.
695 // external_word_type can't be used for values in the first page
696 // so just skip the reloc in that case.
697 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
698 }
699
700 public:
701
702 ExternalAddress(address target) : Address(target, reloc_for_target(target)) {}
703
704 };
705
706 class InternalAddress: public Address {
707
708 public:
709
710 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
711 };
712
713 const int FPUStateSizeInWords = FloatRegister::number_of_registers * FloatRegister::save_slots_per_register;
714
715 typedef enum {
716 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
717 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM,
718 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM
719 } prfop;
720
721 class Assembler : public AbstractAssembler {
722
723 public:
724
725 #ifndef PRODUCT
726 static const uintptr_t asm_bp;
727
728 void emit_int32(jint x) {
729 if ((uintptr_t)pc() == asm_bp)
730 NOP();
731 AbstractAssembler::emit_int32(x);
732 }
733 #else
734 void emit_int32(jint x) {
735 AbstractAssembler::emit_int32(x);
736 }
737 #endif
738
739 enum { instruction_size = 4 };
740
741 //---< calculate length of instruction >---
742 // We just use the values set above.
743 // instruction must start at passed address
744 static unsigned int instr_len(unsigned char *instr) { return instruction_size; }
745
746 //---< longest instructions >---
747 static unsigned int instr_maxlen() { return instruction_size; }
748
749 Address adjust(Register base, int offset, bool preIncrement) {
750 if (preIncrement)
751 return Address(Pre(base, offset));
752 else
753 return Address(Post(base, offset));
754 }
755
756 Address pre(Register base, int offset) {
757 return adjust(base, offset, true);
758 }
759
760 Address post(Register base, int offset) {
761 return adjust(base, offset, false);
762 }
763
764 Address post(Register base, Register idx) {
765 return Address(Post(base, idx));
766 }
767
768 static address locate_next_instruction(address inst);
769
770 #define f current_insn.f
771 #define sf current_insn.sf
772 #define rf current_insn.rf
773 #define lrf current_insn.lrf
774 #define srf current_insn.srf
775 #define zrf current_insn.zrf
776 #define prf current_insn.prf
777 #define pgrf current_insn.pgrf
778
779 typedef void (Assembler::* uncond_branch_insn)(address dest);
780 typedef void (Assembler::* compare_and_branch_insn)(Register Rt, address dest);
781 typedef void (Assembler::* test_and_branch_insn)(Register Rt, int bitpos, address dest);
782 typedef void (Assembler::* prefetch_insn)(address target, prfop);
783
784 void wrap_label(Label &L, uncond_branch_insn insn);
785 void wrap_label(Register r, Label &L, compare_and_branch_insn insn);
786 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn);
787 void wrap_label(Label &L, prfop, prefetch_insn insn);
788
789 // PC-rel. addressing
790
791 void adr(Register Rd, address dest);
792 void _adrp(Register Rd, address dest);
793
794 void adr(Register Rd, const Address &dest);
795 void _adrp(Register Rd, const Address &dest);
796
797 void adr(Register Rd, Label &L) {
798 wrap_label(Rd, L, &Assembler::Assembler::adr);
799 }
800 void _adrp(Register Rd, Label &L) {
801 wrap_label(Rd, L, &Assembler::_adrp);
802 }
803
804 void adrp(Register Rd, const Address &dest, uint64_t &offset) = delete;
805
806 void prfm(const Address &adr, prfop pfop = PLDL1KEEP);
807
808 #undef INSN
809
810 void add_sub_immediate(Instruction_aarch64 ¤t_insn, Register Rd, Register Rn,
811 unsigned uimm, int op, int negated_op);
812
813 // Add/subtract (immediate)
814 #define INSN(NAME, decode, negated) \
815 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \
816 starti; \
817 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \
818 zrf(Rd, 0), srf(Rn, 5); \
819 } \
820 \
821 void NAME(Register Rd, Register Rn, unsigned imm) { \
822 starti; \
823 add_sub_immediate(current_insn, Rd, Rn, imm, decode, negated); \
824 }
825
826 INSN(addsw, 0b001, 0b011);
827 INSN(subsw, 0b011, 0b001);
828 INSN(adds, 0b101, 0b111);
829 INSN(subs, 0b111, 0b101);
830
831 #undef INSN
832
833 #define INSN(NAME, decode, negated) \
834 void NAME(Register Rd, Register Rn, unsigned imm) { \
835 starti; \
836 add_sub_immediate(current_insn, Rd, Rn, imm, decode, negated); \
837 }
838
839 INSN(addw, 0b000, 0b010);
840 INSN(subw, 0b010, 0b000);
841 INSN(add, 0b100, 0b110);
842 INSN(sub, 0b110, 0b100);
843
844 #undef INSN
845
846 // Logical (immediate)
847 #define INSN(NAME, decode, is32) \
848 void NAME(Register Rd, Register Rn, uint64_t imm) { \
849 starti; \
850 uint32_t val = encode_logical_immediate(is32, imm); \
851 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \
852 srf(Rd, 0), zrf(Rn, 5); \
853 }
854
855 INSN(andw, 0b000, true);
856 INSN(orrw, 0b001, true);
857 INSN(eorw, 0b010, true);
858 INSN(andr, 0b100, false);
859 INSN(orr, 0b101, false);
860 INSN(eor, 0b110, false);
861
862 #undef INSN
863
864 #define INSN(NAME, decode, is32) \
865 void NAME(Register Rd, Register Rn, uint64_t imm) { \
866 starti; \
867 uint32_t val = encode_logical_immediate(is32, imm); \
868 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \
869 zrf(Rd, 0), zrf(Rn, 5); \
870 }
871
872 INSN(ands, 0b111, false);
873 INSN(andsw, 0b011, true);
874
875 #undef INSN
876
877 // Move wide (immediate)
878 #define INSN(NAME, opcode) \
879 void NAME(Register Rd, unsigned imm, unsigned shift = 0) { \
880 assert_cond((shift/16)*16 == shift); \
881 starti; \
882 f(opcode, 31, 29), f(0b100101, 28, 23), f(shift/16, 22, 21), \
883 f(imm, 20, 5); \
884 zrf(Rd, 0); \
885 }
886
887 INSN(movnw, 0b000);
888 INSN(movzw, 0b010);
889 INSN(movkw, 0b011);
890 INSN(movn, 0b100);
891 INSN(movz, 0b110);
892 INSN(movk, 0b111);
893
894 #undef INSN
895
896 // Bitfield
897 #define INSN(NAME, opcode, size) \
898 void NAME(Register Rd, Register Rn, unsigned immr, unsigned imms) { \
899 starti; \
900 guarantee(size == 1 || (immr < 32 && imms < 32), "incorrect immr/imms");\
901 f(opcode, 31, 22), f(immr, 21, 16), f(imms, 15, 10); \
902 zrf(Rn, 5), rf(Rd, 0); \
903 }
904
905 INSN(sbfmw, 0b0001001100, 0);
906 INSN(bfmw, 0b0011001100, 0);
907 INSN(ubfmw, 0b0101001100, 0);
908 INSN(sbfm, 0b1001001101, 1);
909 INSN(bfm, 0b1011001101, 1);
910 INSN(ubfm, 0b1101001101, 1);
911
912 #undef INSN
913
914 // Extract
915 #define INSN(NAME, opcode, size) \
916 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \
917 starti; \
918 guarantee(size == 1 || imms < 32, "incorrect imms"); \
919 f(opcode, 31, 21), f(imms, 15, 10); \
920 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
921 }
922
923 INSN(extrw, 0b00010011100, 0);
924 INSN(extr, 0b10010011110, 1);
925
926 #undef INSN
927
928 // The maximum range of a branch is fixed for the AArch64
929 // architecture. In debug mode we shrink it in order to test
930 // trampolines, but not so small that branches in the interpreter
931 // are out of range.
932 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
933
934 static bool reachable_from_branch_at(address branch, address target) {
935 return g_uabs(target - branch) < branch_range;
936 }
937
938 // Unconditional branch (immediate)
939 #define INSN(NAME, opcode) \
940 void NAME(address dest) { \
941 starti; \
942 int64_t offset = (dest - pc()) >> 2; \
943 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
944 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \
945 } \
946 void NAME(Label &L) { \
947 wrap_label(L, &Assembler::NAME); \
948 } \
949 void NAME(const Address &dest);
950
951 INSN(b, 0);
952 INSN(bl, 1);
953
954 #undef INSN
955
956 // Compare & branch (immediate)
957 #define INSN(NAME, opcode) \
958 void NAME(Register Rt, address dest) { \
959 int64_t offset = (dest - pc()) >> 2; \
960 starti; \
961 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \
962 } \
963 void NAME(Register Rt, Label &L) { \
964 wrap_label(Rt, L, &Assembler::NAME); \
965 }
966
967 INSN(cbzw, 0b00110100);
968 INSN(cbnzw, 0b00110101);
969 INSN(cbz, 0b10110100);
970 INSN(cbnz, 0b10110101);
971
972 #undef INSN
973
974 // Test & branch (immediate)
975 #define INSN(NAME, opcode) \
976 void NAME(Register Rt, int bitpos, address dest) { \
977 int64_t offset = (dest - pc()) >> 2; \
978 int b5 = bitpos >> 5; \
979 bitpos &= 0x1f; \
980 starti; \
981 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \
982 rf(Rt, 0); \
983 } \
984 void NAME(Register Rt, int bitpos, Label &L) { \
985 wrap_label(Rt, bitpos, L, &Assembler::NAME); \
986 }
987
988 INSN(tbz, 0b0110110);
989 INSN(tbnz, 0b0110111);
990
991 #undef INSN
992
993 // Conditional branch (immediate)
994 enum Condition
995 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
996
997 void br(Condition cond, address dest) {
998 int64_t offset = (dest - pc()) >> 2;
999 starti;
1000 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
1001 }
1002
1003 #define INSN(NAME, cond) \
1004 void NAME(address dest) { \
1005 br(cond, dest); \
1006 }
1007
1008 INSN(beq, EQ);
1009 INSN(bne, NE);
1010 INSN(bhs, HS);
1011 INSN(bcs, CS);
1012 INSN(blo, LO);
1013 INSN(bcc, CC);
1014 INSN(bmi, MI);
1015 INSN(bpl, PL);
1016 INSN(bvs, VS);
1017 INSN(bvc, VC);
1018 INSN(bhi, HI);
1019 INSN(bls, LS);
1020 INSN(bge, GE);
1021 INSN(blt, LT);
1022 INSN(bgt, GT);
1023 INSN(ble, LE);
1024 INSN(bal, AL);
1025 INSN(bnv, NV);
1026
1027 void br(Condition cc, Label &L);
1028
1029 #undef INSN
1030
1031 // Exception generation
1032 void generate_exception(int opc, int op2, int LL, unsigned imm) {
1033 starti;
1034 f(0b11010100, 31, 24);
1035 f(opc, 23, 21), f(imm, 20, 5), f(op2, 4, 2), f(LL, 1, 0);
1036 }
1037
1038 #define INSN(NAME, opc, op2, LL) \
1039 void NAME(unsigned imm) { \
1040 generate_exception(opc, op2, LL, imm); \
1041 }
1042
1043 INSN(svc, 0b000, 0, 0b01);
1044 INSN(hvc, 0b000, 0, 0b10);
1045 INSN(smc, 0b000, 0, 0b11);
1046 INSN(brk, 0b001, 0, 0b00);
1047 INSN(hlt, 0b010, 0, 0b00);
1048 INSN(dcps1, 0b101, 0, 0b01);
1049 INSN(dcps2, 0b101, 0, 0b10);
1050 INSN(dcps3, 0b101, 0, 0b11);
1051
1052 #undef INSN
1053
1054 // System
1055 void system(int op0, int op1, int CRn, int CRm, int op2,
1056 Register rt = dummy_reg)
1057 {
1058 starti;
1059 f(0b11010101000, 31, 21);
1060 f(op0, 20, 19);
1061 f(op1, 18, 16);
1062 f(CRn, 15, 12);
1063 f(CRm, 11, 8);
1064 f(op2, 7, 5);
1065 rf(rt, 0);
1066 }
1067
1068 // Hint instructions
1069
1070 #define INSN(NAME, crm, op2) \
1071 void NAME() { \
1072 system(0b00, 0b011, 0b0010, crm, op2); \
1073 }
1074
1075 INSN(nop, 0b000, 0b0000);
1076 INSN(yield, 0b000, 0b0001);
1077 INSN(wfe, 0b000, 0b0010);
1078 INSN(wfi, 0b000, 0b0011);
1079 INSN(sev, 0b000, 0b0100);
1080 INSN(sevl, 0b000, 0b0101);
1081
1082 INSN(autia1716, 0b0001, 0b100);
1083 INSN(autiasp, 0b0011, 0b101);
1084 INSN(autiaz, 0b0011, 0b100);
1085 INSN(autib1716, 0b0001, 0b110);
1086 INSN(autibsp, 0b0011, 0b111);
1087 INSN(autibz, 0b0011, 0b110);
1088 INSN(pacia1716, 0b0001, 0b000);
1089 INSN(paciasp, 0b0011, 0b001);
1090 INSN(paciaz, 0b0011, 0b000);
1091 INSN(pacib1716, 0b0001, 0b010);
1092 INSN(pacibsp, 0b0011, 0b011);
1093 INSN(pacibz, 0b0011, 0b010);
1094 INSN(xpaclri, 0b0000, 0b111);
1095
1096 #undef INSN
1097
1098 // we only provide mrs and msr for the special purpose system
1099 // registers where op1 (instr[20:19]) == 11
1100 // n.b msr has L (instr[21]) == 0 mrs has L == 1
1101
1102 void msr(int op1, int CRn, int CRm, int op2, Register rt) {
1103 starti;
1104 f(0b1101010100011, 31, 19);
1105 f(op1, 18, 16);
1106 f(CRn, 15, 12);
1107 f(CRm, 11, 8);
1108 f(op2, 7, 5);
1109 // writing zr is ok
1110 zrf(rt, 0);
1111 }
1112
1113 void mrs(int op1, int CRn, int CRm, int op2, Register rt) {
1114 starti;
1115 f(0b1101010100111, 31, 19);
1116 f(op1, 18, 16);
1117 f(CRn, 15, 12);
1118 f(CRm, 11, 8);
1119 f(op2, 7, 5);
1120 // reading to zr is a mistake
1121 rf(rt, 0);
1122 }
1123
1124 enum barrier {OSHLD = 0b0001, OSHST, OSH, NSHLD=0b0101, NSHST, NSH,
1125 ISHLD = 0b1001, ISHST, ISH, LD=0b1101, ST, SY};
1126
1127 void dsb(barrier imm) {
1128 system(0b00, 0b011, 0b00011, imm, 0b100);
1129 }
1130
1131 void dmb(barrier imm) {
1132 system(0b00, 0b011, 0b00011, imm, 0b101);
1133 }
1134
1135 void isb() {
1136 system(0b00, 0b011, 0b00011, SY, 0b110);
1137 }
1138
1139 void sb() {
1140 system(0b00, 0b011, 0b00011, 0b0000, 0b111);
1141 }
1142
1143 void sys(int op1, int CRn, int CRm, int op2,
1144 Register rt = as_Register(0b11111)) {
1145 system(0b01, op1, CRn, CRm, op2, rt);
1146 }
1147
1148 // Only implement operations accessible from EL0 or higher, i.e.,
1149 // op1 CRn CRm op2
1150 // IC IVAU 3 7 5 1
1151 // DC CVAC 3 7 10 1
1152 // DC CVAP 3 7 12 1
1153 // DC CVAU 3 7 11 1
1154 // DC CIVAC 3 7 14 1
1155 // DC ZVA 3 7 4 1
1156 // So only deal with the CRm field.
1157 enum icache_maintenance {IVAU = 0b0101};
1158 enum dcache_maintenance {CVAC = 0b1010, CVAP = 0b1100, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100};
1159
1160 void dc(dcache_maintenance cm, Register Rt) {
1161 sys(0b011, 0b0111, cm, 0b001, Rt);
1162 }
1163
1164 void ic(icache_maintenance cm, Register Rt) {
1165 sys(0b011, 0b0111, cm, 0b001, Rt);
1166 }
1167
1168 // A more convenient access to dmb for our purposes
1169 enum Membar_mask_bits {
1170 // We can use ISH for a barrier because the Arm ARM says "This
1171 // architecture assumes that all Processing Elements that use the
1172 // same operating system or hypervisor are in the same Inner
1173 // Shareable shareability domain."
1174 StoreStore = ISHST,
1175 LoadStore = ISHLD,
1176 LoadLoad = ISHLD,
1177 StoreLoad = ISH,
1178 AnyAny = ISH
1179 };
1180
1181 void membar(Membar_mask_bits order_constraint) {
1182 dmb(Assembler::barrier(order_constraint));
1183 }
1184
1185 // Unconditional branch (register)
1186
1187 void branch_reg(int OP, int A, int M, Register RN, Register RM) {
1188 starti;
1189 f(0b1101011, 31, 25);
1190 f(OP, 24, 21);
1191 f(0b111110000, 20, 12);
1192 f(A, 11, 11);
1193 f(M, 10, 10);
1194 rf(RN, 5);
1195 rf(RM, 0);
1196 }
1197
1198 #define INSN(NAME, opc) \
1199 void NAME(Register RN) { \
1200 branch_reg(opc, 0, 0, RN, r0); \
1201 }
1202
1203 INSN(br, 0b0000);
1204 INSN(blr, 0b0001);
1205 INSN(ret, 0b0010);
1206
1207 void ret(void *p); // This forces a compile-time error for ret(0)
1208
1209 #undef INSN
1210
1211 #define INSN(NAME, opc) \
1212 void NAME() { \
1213 branch_reg(opc, 0, 0, dummy_reg, r0); \
1214 }
1215
1216 INSN(eret, 0b0100);
1217 INSN(drps, 0b0101);
1218
1219 #undef INSN
1220
1221 #define INSN(NAME, M) \
1222 void NAME() { \
1223 branch_reg(0b0010, 1, M, dummy_reg, dummy_reg); \
1224 }
1225
1226 INSN(retaa, 0);
1227 INSN(retab, 1);
1228
1229 #undef INSN
1230
1231 #define INSN(NAME, OP, M) \
1232 void NAME(Register rn) { \
1233 branch_reg(OP, 1, M, rn, dummy_reg); \
1234 }
1235
1236 INSN(braaz, 0b0000, 0);
1237 INSN(brabz, 0b0000, 1);
1238 INSN(blraaz, 0b0001, 0);
1239 INSN(blrabz, 0b0001, 1);
1240
1241 #undef INSN
1242
1243 #define INSN(NAME, OP, M) \
1244 void NAME(Register rn, Register rm) { \
1245 branch_reg(OP, 1, M, rn, rm); \
1246 }
1247
1248 INSN(braa, 0b1000, 0);
1249 INSN(brab, 0b1000, 1);
1250 INSN(blraa, 0b1001, 0);
1251 INSN(blrab, 0b1001, 1);
1252
1253 #undef INSN
1254
1255 // Load/store exclusive
1256 enum operand_size { byte, halfword, word, xword };
1257
1258 void load_store_exclusive(Register Rs, Register Rt1, Register Rt2,
1259 Register Rn, enum operand_size sz, int op, bool ordered) {
1260 starti;
1261 f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21);
1262 rf(Rs, 16), f(ordered, 15), zrf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0);
1263 }
1264
1265 void load_exclusive(Register dst, Register addr,
1266 enum operand_size sz, bool ordered) {
1267 load_store_exclusive(dummy_reg, dst, dummy_reg, addr,
1268 sz, 0b010, ordered);
1269 }
1270
1271 void store_exclusive(Register status, Register new_val, Register addr,
1272 enum operand_size sz, bool ordered) {
1273 load_store_exclusive(status, new_val, dummy_reg, addr,
1274 sz, 0b000, ordered);
1275 }
1276
1277 #define INSN4(NAME, sz, op, o0) /* Four registers */ \
1278 void NAME(Register Rs, Register Rt1, Register Rt2, Register Rn) { \
1279 guarantee(Rs != Rn && Rs != Rt1 && Rs != Rt2, "unpredictable instruction"); \
1280 load_store_exclusive(Rs, Rt1, Rt2, Rn, sz, op, o0); \
1281 }
1282
1283 #define INSN3(NAME, sz, op, o0) /* Three registers */ \
1284 void NAME(Register Rs, Register Rt, Register Rn) { \
1285 guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \
1286 load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \
1287 }
1288
1289 #define INSN2(NAME, sz, op, o0) /* Two registers */ \
1290 void NAME(Register Rt, Register Rn) { \
1291 load_store_exclusive(dummy_reg, Rt, dummy_reg, \
1292 Rn, sz, op, o0); \
1293 }
1294
1295 #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \
1296 void NAME(Register Rt1, Register Rt2, Register Rn) { \
1297 guarantee(Rt1 != Rt2, "unpredictable instruction"); \
1298 load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0); \
1299 }
1300
1301 // bytes
1302 INSN3(stxrb, byte, 0b000, 0);
1303 INSN3(stlxrb, byte, 0b000, 1);
1304 INSN2(ldxrb, byte, 0b010, 0);
1305 INSN2(ldaxrb, byte, 0b010, 1);
1306 INSN2(stlrb, byte, 0b100, 1);
1307 INSN2(ldarb, byte, 0b110, 1);
1308
1309 // halfwords
1310 INSN3(stxrh, halfword, 0b000, 0);
1311 INSN3(stlxrh, halfword, 0b000, 1);
1312 INSN2(ldxrh, halfword, 0b010, 0);
1313 INSN2(ldaxrh, halfword, 0b010, 1);
1314 INSN2(stlrh, halfword, 0b100, 1);
1315 INSN2(ldarh, halfword, 0b110, 1);
1316
1317 // words
1318 INSN3(stxrw, word, 0b000, 0);
1319 INSN3(stlxrw, word, 0b000, 1);
1320 INSN4(stxpw, word, 0b001, 0);
1321 INSN4(stlxpw, word, 0b001, 1);
1322 INSN2(ldxrw, word, 0b010, 0);
1323 INSN2(ldaxrw, word, 0b010, 1);
1324 INSN2(stlrw, word, 0b100, 1);
1325 INSN2(ldarw, word, 0b110, 1);
1326 // pairs of words
1327 INSN_FOO(ldxpw, word, 0b011, 0);
1328 INSN_FOO(ldaxpw, word, 0b011, 1);
1329
1330 // xwords
1331 INSN3(stxr, xword, 0b000, 0);
1332 INSN3(stlxr, xword, 0b000, 1);
1333 INSN4(stxp, xword, 0b001, 0);
1334 INSN4(stlxp, xword, 0b001, 1);
1335 INSN2(ldxr, xword, 0b010, 0);
1336 INSN2(ldaxr, xword, 0b010, 1);
1337 INSN2(stlr, xword, 0b100, 1);
1338 INSN2(ldar, xword, 0b110, 1);
1339 // pairs of xwords
1340 INSN_FOO(ldxp, xword, 0b011, 0);
1341 INSN_FOO(ldaxp, xword, 0b011, 1);
1342
1343 #undef INSN2
1344 #undef INSN3
1345 #undef INSN4
1346 #undef INSN_FOO
1347
1348 // 8.1 Compare and swap extensions
1349 void lse_cas(Register Rs, Register Rt, Register Rn,
1350 enum operand_size sz, bool a, bool r, bool not_pair) {
1351 starti;
1352 if (! not_pair) { // Pair
1353 assert(sz == word || sz == xword, "invalid size");
1354 /* The size bit is in bit 30, not 31 */
1355 sz = (operand_size)(sz == word ? 0b00:0b01);
1356 }
1357 f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21);
1358 zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0);
1359 }
1360
1361 // CAS
1362 #define INSN(NAME, a, r) \
1363 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1364 assert(Rs != Rn && Rs != Rt, "unpredictable instruction"); \
1365 lse_cas(Rs, Rt, Rn, sz, a, r, true); \
1366 }
1367 INSN(cas, false, false)
1368 INSN(casa, true, false)
1369 INSN(casl, false, true)
1370 INSN(casal, true, true)
1371 #undef INSN
1372
1373 // CASP
1374 #define INSN(NAME, a, r) \
1375 void NAME(operand_size sz, Register Rs, Register Rs1, \
1376 Register Rt, Register Rt1, Register Rn) { \
1377 assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 && \
1378 Rs->successor() == Rs1 && Rt->successor() == Rt1 && \
1379 Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers"); \
1380 lse_cas(Rs, Rt, Rn, sz, a, r, false); \
1381 }
1382 INSN(casp, false, false)
1383 INSN(caspa, true, false)
1384 INSN(caspl, false, true)
1385 INSN(caspal, true, true)
1386 #undef INSN
1387
1388 // 8.1 Atomic operations
1389 void lse_atomic(Register Rs, Register Rt, Register Rn,
1390 enum operand_size sz, int op1, int op2, bool a, bool r) {
1391 starti;
1392 f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21);
1393 zrf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0);
1394 }
1395
1396 #define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \
1397 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1398 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \
1399 } \
1400 void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1401 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \
1402 } \
1403 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1404 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \
1405 } \
1406 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\
1407 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \
1408 }
1409 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000);
1410 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001);
1411 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010);
1412 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011);
1413 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100);
1414 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101);
1415 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110);
1416 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111);
1417 INSN(swp, swpa, swpl, swpal, 1, 0b000);
1418 #undef INSN
1419
1420 // Load register (literal)
1421 #define INSN(NAME, opc, V) \
1422 void NAME(Register Rt, address dest) { \
1423 int64_t offset = (dest - pc()) >> 2; \
1424 starti; \
1425 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1426 sf(offset, 23, 5); \
1427 rf(Rt, 0); \
1428 } \
1429 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \
1430 InstructionMark im(this); \
1431 guarantee(rtype == relocInfo::internal_word_type, \
1432 "only internal_word_type relocs make sense here"); \
1433 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \
1434 NAME(Rt, dest); \
1435 } \
1436 void NAME(Register Rt, Label &L) { \
1437 wrap_label(Rt, L, &Assembler::NAME); \
1438 }
1439
1440 INSN(ldrw, 0b00, 0);
1441 INSN(ldr, 0b01, 0);
1442 INSN(ldrsw, 0b10, 0);
1443
1444 #undef INSN
1445
1446 #define INSN(NAME, opc, V) \
1447 void NAME(FloatRegister Rt, address dest) { \
1448 int64_t offset = (dest - pc()) >> 2; \
1449 starti; \
1450 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1451 sf(offset, 23, 5); \
1452 rf(as_Register(Rt), 0); \
1453 }
1454
1455 INSN(ldrs, 0b00, 1);
1456 INSN(ldrd, 0b01, 1);
1457 INSN(ldrq, 0b10, 1);
1458
1459 #undef INSN
1460
1461 #define INSN(NAME, size, opc) \
1462 void NAME(FloatRegister Rt, Register Rn) { \
1463 starti; \
1464 f(size, 31, 30), f(0b111100, 29, 24), f(opc, 23, 22), f(0, 21); \
1465 f(0, 20, 12), f(0b01, 11, 10); \
1466 rf(Rn, 5), rf(as_Register(Rt), 0); \
1467 }
1468
1469 INSN(ldrs, 0b10, 0b01);
1470 INSN(ldrd, 0b11, 0b01);
1471 INSN(ldrq, 0b00, 0b11);
1472
1473 #undef INSN
1474
1475
1476 #define INSN(NAME, opc, V) \
1477 void NAME(address dest, prfop op = PLDL1KEEP) { \
1478 int64_t offset = (dest - pc()) >> 2; \
1479 starti; \
1480 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1481 sf(offset, 23, 5); \
1482 f(op, 4, 0); \
1483 } \
1484 void NAME(Label &L, prfop op = PLDL1KEEP) { \
1485 wrap_label(L, op, &Assembler::NAME); \
1486 }
1487
1488 INSN(prfm, 0b11, 0);
1489
1490 #undef INSN
1491
1492 // Load/store
1493 void ld_st1(int opc, int p1, int V, int L,
1494 Register Rt1, Register Rt2, Address adr, bool no_allocate) {
1495 starti;
1496 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22);
1497 zrf(Rt2, 10), zrf(Rt1, 0);
1498 if (no_allocate) {
1499 adr.encode_nontemporal_pair(¤t_insn);
1500 } else {
1501 adr.encode_pair(¤t_insn);
1502 }
1503 }
1504
1505 // Load/store register pair (offset)
1506 #define INSN(NAME, size, p1, V, L, no_allocate) \
1507 void NAME(Register Rt1, Register Rt2, Address adr) { \
1508 ld_st1(size, p1, V, L, Rt1, Rt2, adr, no_allocate); \
1509 }
1510
1511 INSN(stpw, 0b00, 0b101, 0, 0, false);
1512 INSN(ldpw, 0b00, 0b101, 0, 1, false);
1513 INSN(ldpsw, 0b01, 0b101, 0, 1, false);
1514 INSN(stp, 0b10, 0b101, 0, 0, false);
1515 INSN(ldp, 0b10, 0b101, 0, 1, false);
1516
1517 // Load/store no-allocate pair (offset)
1518 INSN(stnpw, 0b00, 0b101, 0, 0, true);
1519 INSN(ldnpw, 0b00, 0b101, 0, 1, true);
1520 INSN(stnp, 0b10, 0b101, 0, 0, true);
1521 INSN(ldnp, 0b10, 0b101, 0, 1, true);
1522
1523 #undef INSN
1524
1525 #define INSN(NAME, size, p1, V, L, no_allocate) \
1526 void NAME(FloatRegister Rt1, FloatRegister Rt2, Address adr) { \
1527 ld_st1(size, p1, V, L, \
1528 as_Register(Rt1), as_Register(Rt2), adr, no_allocate); \
1529 }
1530
1531 INSN(stps, 0b00, 0b101, 1, 0, false);
1532 INSN(ldps, 0b00, 0b101, 1, 1, false);
1533 INSN(stpd, 0b01, 0b101, 1, 0, false);
1534 INSN(ldpd, 0b01, 0b101, 1, 1, false);
1535 INSN(stpq, 0b10, 0b101, 1, 0, false);
1536 INSN(ldpq, 0b10, 0b101, 1, 1, false);
1537
1538 #undef INSN
1539
1540 // Load/store register (all modes)
1541 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) {
1542 starti;
1543
1544 f(V, 26); // general reg?
1545 zrf(Rt, 0);
1546
1547 // Encoding for literal loads is done here (rather than pushed
1548 // down into Address::encode) because the encoding of this
1549 // instruction is too different from all of the other forms to
1550 // make it worth sharing.
1551 if (adr.getMode() == Address::literal) {
1552 assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
1553 assert(op == 0b01, "literal form can only be used with loads");
1554 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
1555 int64_t offset = (adr.target() - pc()) >> 2;
1556 sf(offset, 23, 5);
1557 code_section()->relocate(pc(), adr.rspec());
1558 return;
1559 }
1560
1561 f(size, 31, 30);
1562 f(op, 23, 22); // str
1563 adr.encode(¤t_insn);
1564 }
1565
1566 #define INSN(NAME, size, op) \
1567 void NAME(Register Rt, const Address &adr) { \
1568 ld_st2(Rt, adr, size, op); \
1569 } \
1570
1571 INSN(str, 0b11, 0b00);
1572 INSN(strw, 0b10, 0b00);
1573 INSN(strb, 0b00, 0b00);
1574 INSN(strh, 0b01, 0b00);
1575
1576 INSN(ldr, 0b11, 0b01);
1577 INSN(ldrw, 0b10, 0b01);
1578 INSN(ldrb, 0b00, 0b01);
1579 INSN(ldrh, 0b01, 0b01);
1580
1581 INSN(ldrsb, 0b00, 0b10);
1582 INSN(ldrsbw, 0b00, 0b11);
1583 INSN(ldrsh, 0b01, 0b10);
1584 INSN(ldrshw, 0b01, 0b11);
1585 INSN(ldrsw, 0b10, 0b10);
1586
1587 #undef INSN
1588
1589 #define INSN(NAME, size, op) \
1590 void NAME(FloatRegister Rt, const Address &adr) { \
1591 ld_st2(as_Register(Rt), adr, size, op, 1); \
1592 }
1593
1594 INSN(strd, 0b11, 0b00);
1595 INSN(strs, 0b10, 0b00);
1596 INSN(ldrd, 0b11, 0b01);
1597 INSN(ldrs, 0b10, 0b01);
1598 INSN(strq, 0b00, 0b10);
1599 INSN(ldrq, 0x00, 0b11);
1600
1601 #undef INSN
1602
1603 // Load/store a register, but with a BasicType parameter. Loaded signed integer values are
1604 // extended to 64 bits.
1605 void load(Register Rt, const Address &adr, BasicType bt) {
1606 int op = (is_signed_subword_type(bt) || bt == T_INT) ? 0b10 : 0b01;
1607 ld_st2(Rt, adr, exact_log2(type2aelembytes(bt)), op);
1608 }
1609 void store(Register Rt, const Address &adr, BasicType bt) {
1610 ld_st2(Rt, adr, exact_log2(type2aelembytes(bt)), 0b00);
1611 }
1612
1613 /* SIMD extensions
1614 *
1615 * We just use FloatRegister in the following. They are exactly the same
1616 * as SIMD registers.
1617 */
1618 public:
1619
1620 enum SIMD_Arrangement {
1621 T8B, T16B, T4H, T8H, T2S, T4S, T1D, T2D, T1Q, INVALID_ARRANGEMENT
1622 };
1623
1624 enum SIMD_RegVariant {
1625 B, H, S, D, Q, INVALID
1626 };
1627
1628 private:
1629
1630 static SIMD_Arrangement _esize2arrangement_table[9][2];
1631 static SIMD_RegVariant _esize2regvariant[9];
1632
1633 public:
1634
1635 static SIMD_Arrangement esize2arrangement(unsigned esize, bool isQ);
1636 static SIMD_RegVariant elemType_to_regVariant(BasicType bt);
1637 static SIMD_RegVariant elemBytes_to_regVariant(unsigned esize);
1638 // Return the corresponding bits for different SIMD_RegVariant value.
1639 static unsigned regVariant_to_elemBits(SIMD_RegVariant T);
1640
1641 enum shift_kind { LSL, LSR, ASR, ROR };
1642
1643 void op_shifted_reg(Instruction_aarch64 ¤t_insn, unsigned decode,
1644 enum shift_kind kind, unsigned shift,
1645 unsigned size, unsigned op) {
1646 f(size, 31);
1647 f(op, 30, 29);
1648 f(decode, 28, 24);
1649 f(shift, 15, 10);
1650 f(kind, 23, 22);
1651 }
1652
1653 // Logical (shifted register)
1654 #define INSN(NAME, size, op, N) \
1655 void NAME(Register Rd, Register Rn, Register Rm, \
1656 enum shift_kind kind = LSL, unsigned shift = 0) { \
1657 starti; \
1658 guarantee(size == 1 || shift < 32, "incorrect shift"); \
1659 f(N, 21); \
1660 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
1661 op_shifted_reg(current_insn, 0b01010, kind, shift, size, op); \
1662 }
1663
1664 INSN(andr, 1, 0b00, 0);
1665 INSN(orr, 1, 0b01, 0);
1666 INSN(eor, 1, 0b10, 0);
1667 INSN(ands, 1, 0b11, 0);
1668 INSN(andw, 0, 0b00, 0);
1669 INSN(orrw, 0, 0b01, 0);
1670 INSN(eorw, 0, 0b10, 0);
1671 INSN(andsw, 0, 0b11, 0);
1672
1673 #undef INSN
1674
1675 #define INSN(NAME, size, op, N) \
1676 void NAME(Register Rd, Register Rn, Register Rm, \
1677 enum shift_kind kind = LSL, unsigned shift = 0) { \
1678 starti; \
1679 f(N, 21); \
1680 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
1681 op_shifted_reg(current_insn, 0b01010, kind, shift, size, op); \
1682 } \
1683 \
1684 /* These instructions have no immediate form. Provide an overload so \
1685 that if anyone does try to use an immediate operand -- this has \
1686 happened! -- we'll get a compile-time error. */ \
1687 void NAME(Register Rd, Register Rn, unsigned imm, \
1688 enum shift_kind kind = LSL, unsigned shift = 0) { \
1689 assert(false, " can't be used with immediate operand"); \
1690 }
1691
1692 INSN(bic, 1, 0b00, 1);
1693 INSN(orn, 1, 0b01, 1);
1694 INSN(eon, 1, 0b10, 1);
1695 INSN(bics, 1, 0b11, 1);
1696 INSN(bicw, 0, 0b00, 1);
1697 INSN(ornw, 0, 0b01, 1);
1698 INSN(eonw, 0, 0b10, 1);
1699 INSN(bicsw, 0, 0b11, 1);
1700
1701 #undef INSN
1702
1703 #ifdef _WIN64
1704 // In MSVC, `mvn` is defined as a macro and it affects compilation
1705 #undef mvn
1706 #endif
1707
1708 // Aliases for short forms of orn
1709 void mvn(Register Rd, Register Rm,
1710 enum shift_kind kind = LSL, unsigned shift = 0) {
1711 orn(Rd, zr, Rm, kind, shift);
1712 }
1713
1714 void mvnw(Register Rd, Register Rm,
1715 enum shift_kind kind = LSL, unsigned shift = 0) {
1716 ornw(Rd, zr, Rm, kind, shift);
1717 }
1718
1719 // Add/subtract (shifted register)
1720 #define INSN(NAME, size, op) \
1721 void NAME(Register Rd, Register Rn, Register Rm, \
1722 enum shift_kind kind, unsigned shift = 0) { \
1723 starti; \
1724 f(0, 21); \
1725 assert_cond(kind != ROR); \
1726 guarantee(size == 1 || shift < 32, "incorrect shift");\
1727 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \
1728 op_shifted_reg(current_insn, 0b01011, kind, shift, size, op); \
1729 }
1730
1731 INSN(add, 1, 0b000);
1732 INSN(sub, 1, 0b10);
1733 INSN(addw, 0, 0b000);
1734 INSN(subw, 0, 0b10);
1735
1736 INSN(adds, 1, 0b001);
1737 INSN(subs, 1, 0b11);
1738 INSN(addsw, 0, 0b001);
1739 INSN(subsw, 0, 0b11);
1740
1741 #undef INSN
1742
1743 // Add/subtract (extended register)
1744 #define INSN(NAME, op) \
1745 void NAME(Register Rd, Register Rn, Register Rm, \
1746 ext::operation option, int amount = 0) { \
1747 starti; \
1748 zrf(Rm, 16), srf(Rn, 5), srf(Rd, 0); \
1749 add_sub_extended_reg(current_insn, op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \
1750 }
1751
1752 void add_sub_extended_reg(Instruction_aarch64 ¤t_insn, unsigned op, unsigned decode,
1753 Register Rd, Register Rn, Register Rm,
1754 unsigned opt, ext::operation option, unsigned imm) {
1755 guarantee(imm <= 4, "shift amount must be <= 4");
1756 f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21);
1757 f(option, 15, 13), f(imm, 12, 10);
1758 }
1759
1760 INSN(addw, 0b000);
1761 INSN(subw, 0b010);
1762 INSN(add, 0b100);
1763 INSN(sub, 0b110);
1764
1765 #undef INSN
1766
1767 #define INSN(NAME, op) \
1768 void NAME(Register Rd, Register Rn, Register Rm, \
1769 ext::operation option, int amount = 0) { \
1770 starti; \
1771 zrf(Rm, 16), srf(Rn, 5), zrf(Rd, 0); \
1772 add_sub_extended_reg(current_insn, op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \
1773 }
1774
1775 INSN(addsw, 0b001);
1776 INSN(subsw, 0b011);
1777 INSN(adds, 0b101);
1778 INSN(subs, 0b111);
1779
1780 #undef INSN
1781
1782 // Aliases for short forms of add and sub
1783 #define INSN(NAME) \
1784 void NAME(Register Rd, Register Rn, Register Rm) { \
1785 if (Rd == sp || Rn == sp) \
1786 NAME(Rd, Rn, Rm, ext::uxtx); \
1787 else \
1788 NAME(Rd, Rn, Rm, LSL); \
1789 }
1790
1791 INSN(addw);
1792 INSN(subw);
1793 INSN(add);
1794 INSN(sub);
1795
1796 INSN(addsw);
1797 INSN(subsw);
1798 INSN(adds);
1799 INSN(subs);
1800
1801 #undef INSN
1802
1803 // Add/subtract (with carry)
1804 void add_sub_carry(unsigned op, Register Rd, Register Rn, Register Rm) {
1805 starti;
1806 f(op, 31, 29);
1807 f(0b11010000, 28, 21);
1808 f(0b000000, 15, 10);
1809 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0);
1810 }
1811
1812 #define INSN(NAME, op) \
1813 void NAME(Register Rd, Register Rn, Register Rm) { \
1814 add_sub_carry(op, Rd, Rn, Rm); \
1815 }
1816
1817 INSN(adcw, 0b000);
1818 INSN(adcsw, 0b001);
1819 INSN(sbcw, 0b010);
1820 INSN(sbcsw, 0b011);
1821 INSN(adc, 0b100);
1822 INSN(adcs, 0b101);
1823 INSN(sbc, 0b110);
1824 INSN(sbcs, 0b111);
1825
1826 #undef INSN
1827
1828 // Conditional compare (both kinds)
1829 void conditional_compare(unsigned op, int o1, int o2, int o3,
1830 Register Rn, unsigned imm5, unsigned nzcv,
1831 unsigned cond) {
1832 starti;
1833 f(op, 31, 29);
1834 f(0b11010010, 28, 21);
1835 f(cond, 15, 12);
1836 f(o1, 11);
1837 f(o2, 10);
1838 f(o3, 4);
1839 f(nzcv, 3, 0);
1840 f(imm5, 20, 16), zrf(Rn, 5);
1841 }
1842
1843 #define INSN(NAME, op) \
1844 void NAME(Register Rn, Register Rm, int imm, Condition cond) { \
1845 int regNumber = (Rm == zr ? 31 : Rm->encoding()); \
1846 conditional_compare(op, 0, 0, 0, Rn, regNumber, imm, cond); \
1847 } \
1848 \
1849 void NAME(Register Rn, int imm5, int imm, Condition cond) { \
1850 conditional_compare(op, 1, 0, 0, Rn, imm5, imm, cond); \
1851 }
1852
1853 INSN(ccmnw, 0b001);
1854 INSN(ccmpw, 0b011);
1855 INSN(ccmn, 0b101);
1856 INSN(ccmp, 0b111);
1857
1858 #undef INSN
1859
1860 // Conditional select
1861 void conditional_select(unsigned op, unsigned op2,
1862 Register Rd, Register Rn, Register Rm,
1863 unsigned cond) {
1864 starti;
1865 f(op, 31, 29);
1866 f(0b11010100, 28, 21);
1867 f(cond, 15, 12);
1868 f(op2, 11, 10);
1869 zrf(Rm, 16), zrf(Rn, 5), rf(Rd, 0);
1870 }
1871
1872 #define INSN(NAME, op, op2) \
1873 void NAME(Register Rd, Register Rn, Register Rm, Condition cond) { \
1874 conditional_select(op, op2, Rd, Rn, Rm, cond); \
1875 }
1876
1877 INSN(cselw, 0b000, 0b00);
1878 INSN(csincw, 0b000, 0b01);
1879 INSN(csinvw, 0b010, 0b00);
1880 INSN(csnegw, 0b010, 0b01);
1881 INSN(csel, 0b100, 0b00);
1882 INSN(csinc, 0b100, 0b01);
1883 INSN(csinv, 0b110, 0b00);
1884 INSN(csneg, 0b110, 0b01);
1885
1886 #undef INSN
1887
1888 // Data processing
1889 void data_processing(Instruction_aarch64 ¤t_insn, unsigned op29, unsigned opcode,
1890 Register Rd, Register Rn) {
1891 f(op29, 31, 29), f(0b11010110, 28, 21);
1892 f(opcode, 15, 10);
1893 rf(Rn, 5), rf(Rd, 0);
1894 }
1895
1896 // (1 source)
1897 #define INSN(NAME, op29, opcode2, opcode) \
1898 void NAME(Register Rd, Register Rn) { \
1899 starti; \
1900 f(opcode2, 20, 16); \
1901 data_processing(current_insn, op29, opcode, Rd, Rn); \
1902 }
1903
1904 INSN(rbitw, 0b010, 0b00000, 0b00000);
1905 INSN(rev16w, 0b010, 0b00000, 0b00001);
1906 INSN(revw, 0b010, 0b00000, 0b00010);
1907 INSN(clzw, 0b010, 0b00000, 0b00100);
1908 INSN(clsw, 0b010, 0b00000, 0b00101);
1909
1910 INSN(rbit, 0b110, 0b00000, 0b00000);
1911 INSN(rev16, 0b110, 0b00000, 0b00001);
1912 INSN(rev32, 0b110, 0b00000, 0b00010);
1913 INSN(rev, 0b110, 0b00000, 0b00011);
1914 INSN(clz, 0b110, 0b00000, 0b00100);
1915 INSN(cls, 0b110, 0b00000, 0b00101);
1916
1917 // PAC instructions
1918 INSN(pacia, 0b110, 0b00001, 0b00000);
1919 INSN(pacib, 0b110, 0b00001, 0b00001);
1920 INSN(pacda, 0b110, 0b00001, 0b00010);
1921 INSN(pacdb, 0b110, 0b00001, 0b00011);
1922 INSN(autia, 0b110, 0b00001, 0b00100);
1923 INSN(autib, 0b110, 0b00001, 0b00101);
1924 INSN(autda, 0b110, 0b00001, 0b00110);
1925 INSN(autdb, 0b110, 0b00001, 0b00111);
1926
1927 #undef INSN
1928
1929 #define INSN(NAME, op29, opcode2, opcode) \
1930 void NAME(Register Rd) { \
1931 starti; \
1932 f(opcode2, 20, 16); \
1933 data_processing(current_insn, op29, opcode, Rd, dummy_reg); \
1934 }
1935
1936 // PAC instructions (with zero modifier)
1937 INSN(paciza, 0b110, 0b00001, 0b01000);
1938 INSN(pacizb, 0b110, 0b00001, 0b01001);
1939 INSN(pacdza, 0b110, 0b00001, 0b01010);
1940 INSN(pacdzb, 0b110, 0b00001, 0b01011);
1941 INSN(autiza, 0b110, 0b00001, 0b01100);
1942 INSN(autizb, 0b110, 0b00001, 0b01101);
1943 INSN(autdza, 0b110, 0b00001, 0b01110);
1944 INSN(autdzb, 0b110, 0b00001, 0b01111);
1945 INSN(xpaci, 0b110, 0b00001, 0b10000);
1946 INSN(xpacd, 0b110, 0b00001, 0b10001);
1947
1948 #undef INSN
1949
1950 // Data-processing (2 source)
1951 #define INSN(NAME, op29, opcode) \
1952 void NAME(Register Rd, Register Rn, Register Rm) { \
1953 starti; \
1954 rf(Rm, 16); \
1955 data_processing(current_insn, op29, opcode, Rd, Rn); \
1956 }
1957
1958 INSN(udivw, 0b000, 0b000010);
1959 INSN(sdivw, 0b000, 0b000011);
1960 INSN(lslvw, 0b000, 0b001000);
1961 INSN(lsrvw, 0b000, 0b001001);
1962 INSN(asrvw, 0b000, 0b001010);
1963 INSN(rorvw, 0b000, 0b001011);
1964
1965 INSN(udiv, 0b100, 0b000010);
1966 INSN(sdiv, 0b100, 0b000011);
1967 INSN(lslv, 0b100, 0b001000);
1968 INSN(lsrv, 0b100, 0b001001);
1969 INSN(asrv, 0b100, 0b001010);
1970 INSN(rorv, 0b100, 0b001011);
1971
1972 #undef INSN
1973
1974 // Data-processing (3 source)
1975 void data_processing(unsigned op54, unsigned op31, unsigned o0,
1976 Register Rd, Register Rn, Register Rm,
1977 Register Ra) {
1978 starti;
1979 f(op54, 31, 29), f(0b11011, 28, 24);
1980 f(op31, 23, 21), f(o0, 15);
1981 zrf(Rm, 16), zrf(Ra, 10), zrf(Rn, 5), zrf(Rd, 0);
1982 }
1983
1984 #define INSN(NAME, op54, op31, o0) \
1985 void NAME(Register Rd, Register Rn, Register Rm, Register Ra) { \
1986 data_processing(op54, op31, o0, Rd, Rn, Rm, Ra); \
1987 }
1988
1989 INSN(maddw, 0b000, 0b000, 0);
1990 INSN(msubw, 0b000, 0b000, 1);
1991 INSN(madd, 0b100, 0b000, 0);
1992 INSN(msub, 0b100, 0b000, 1);
1993 INSN(smaddl, 0b100, 0b001, 0);
1994 INSN(smsubl, 0b100, 0b001, 1);
1995 INSN(umaddl, 0b100, 0b101, 0);
1996 INSN(umsubl, 0b100, 0b101, 1);
1997
1998 #undef INSN
1999
2000 #define INSN(NAME, op54, op31, o0) \
2001 void NAME(Register Rd, Register Rn, Register Rm) { \
2002 data_processing(op54, op31, o0, Rd, Rn, Rm, as_Register(31)); \
2003 }
2004
2005 INSN(smulh, 0b100, 0b010, 0);
2006 INSN(umulh, 0b100, 0b110, 0);
2007
2008 #undef INSN
2009
2010 // Floating-point data-processing (1 source)
2011 void data_processing(unsigned type, unsigned opcode,
2012 FloatRegister Vd, FloatRegister Vn) {
2013 starti;
2014 f(0b000, 31, 29);
2015 f(0b11110, 28, 24);
2016 f(type, 23, 22), f(1, 21), f(opcode, 20, 15), f(0b10000, 14, 10);
2017 rf(Vn, 5), rf(Vd, 0);
2018 }
2019
2020 #define INSN(NAME, type, opcode) \
2021 void NAME(FloatRegister Vd, FloatRegister Vn) { \
2022 data_processing(type, opcode, Vd, Vn); \
2023 }
2024
2025 INSN(fmovs, 0b00, 0b000000);
2026 INSN(fabss, 0b00, 0b000001);
2027 INSN(fnegs, 0b00, 0b000010);
2028 INSN(fsqrts, 0b00, 0b000011);
2029 INSN(fcvts, 0b00, 0b000101); // Single-precision to double-precision
2030 INSN(fcvths, 0b11, 0b000100); // Half-precision to single-precision
2031 INSN(fcvtsh, 0b00, 0b000111); // Single-precision to half-precision
2032
2033 INSN(fmovd, 0b01, 0b000000);
2034 INSN(fabsd, 0b01, 0b000001);
2035 INSN(fnegd, 0b01, 0b000010);
2036 INSN(fsqrtd, 0b01, 0b000011);
2037 INSN(fcvtd, 0b01, 0b000100); // Double-precision to single-precision
2038
2039 INSN(fsqrth, 0b11, 0b000011); // Half-precision sqrt
2040
2041 private:
2042 void _fcvt_narrow_extend(FloatRegister Vd, SIMD_Arrangement Ta,
2043 FloatRegister Vn, SIMD_Arrangement Tb, bool do_extend) {
2044 assert((do_extend && (Tb >> 1) + 1 == (Ta >> 1))
2045 || (!do_extend && (Ta >> 1) + 1 == (Tb >> 1)), "Incompatible arrangement");
2046 starti;
2047 int op30 = (do_extend ? Tb : Ta) & 1;
2048 int op22 = ((do_extend ? Ta : Tb) >> 1) & 1;
2049 f(0, 31), f(op30, 30), f(0b0011100, 29, 23), f(op22, 22);
2050 f(0b100001011, 21, 13), f(do_extend ? 1 : 0, 12), f(0b10, 11, 10);
2051 rf(Vn, 5), rf(Vd, 0);
2052 }
2053
2054 public:
2055 void fcvtl(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) {
2056 assert(Tb == T4H || Tb == T8H|| Tb == T2S || Tb == T4S, "invalid arrangement");
2057 _fcvt_narrow_extend(Vd, Ta, Vn, Tb, true);
2058 }
2059
2060 void fcvtn(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) {
2061 assert(Ta == T4H || Ta == T8H|| Ta == T2S || Ta == T4S, "invalid arrangement");
2062 _fcvt_narrow_extend(Vd, Ta, Vn, Tb, false);
2063 }
2064
2065 #undef INSN
2066
2067 // Floating-point data-processing (2 source)
2068 void data_processing(unsigned op31, unsigned type, unsigned opcode, unsigned op21,
2069 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) {
2070 starti;
2071 f(op31, 31, 29);
2072 f(0b11110, 28, 24);
2073 f(type, 23, 22), f(op21, 21), f(opcode, 15, 10);
2074 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0);
2075 }
2076
2077 #define INSN(NAME, op31, type, opcode, op21) \
2078 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \
2079 data_processing(op31, type, opcode, op21, Vd, Vn, Vm); \
2080 }
2081
2082 INSN(fmuls, 0b000, 0b00, 0b000010, 0b1);
2083 INSN(fdivs, 0b000, 0b00, 0b000110, 0b1);
2084 INSN(fadds, 0b000, 0b00, 0b001010, 0b1);
2085 INSN(fsubs, 0b000, 0b00, 0b001110, 0b1);
2086 INSN(fmaxs, 0b000, 0b00, 0b010010, 0b1);
2087 INSN(fmins, 0b000, 0b00, 0b010110, 0b1);
2088 INSN(fnmuls, 0b000, 0b00, 0b100010, 0b1);
2089
2090 INSN(fmuld, 0b000, 0b01, 0b000010, 0b1);
2091 INSN(fdivd, 0b000, 0b01, 0b000110, 0b1);
2092 INSN(faddd, 0b000, 0b01, 0b001010, 0b1);
2093 INSN(fsubd, 0b000, 0b01, 0b001110, 0b1);
2094 INSN(fmaxd, 0b000, 0b01, 0b010010, 0b1);
2095 INSN(fmind, 0b000, 0b01, 0b010110, 0b1);
2096 INSN(fnmuld, 0b000, 0b01, 0b100010, 0b1);
2097
2098 // Half-precision floating-point instructions
2099 INSN(fmulh, 0b000, 0b11, 0b000010, 0b1);
2100 INSN(fdivh, 0b000, 0b11, 0b000110, 0b1);
2101 INSN(faddh, 0b000, 0b11, 0b001010, 0b1);
2102 INSN(fsubh, 0b000, 0b11, 0b001110, 0b1);
2103 INSN(fmaxh, 0b000, 0b11, 0b010010, 0b1);
2104 INSN(fminh, 0b000, 0b11, 0b010110, 0b1);
2105 INSN(fnmulh, 0b000, 0b11, 0b100010, 0b1);
2106 #undef INSN
2107
2108 // Advanced SIMD scalar three same
2109 #define INSN(NAME, U, size, opcode) \
2110 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \
2111 starti; \
2112 f(0b01, 31, 30), f(U, 29), f(0b11110, 28, 24), f(size, 23, 22), f(1, 21); \
2113 rf(Vm, 16), f(opcode, 15, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); \
2114 }
2115
2116 INSN(fabds, 0b1, 0b10, 0b11010); // Floating-point Absolute Difference (single-precision)
2117 INSN(fabdd, 0b1, 0b11, 0b11010); // Floating-point Absolute Difference (double-precision)
2118
2119 #undef INSN
2120
2121 // Advanced SIMD scalar three same FP16
2122 #define INSN(NAME, U, a, opcode) \
2123 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \
2124 starti; \
2125 f(0b01, 31, 30), f(U, 29), f(0b11110, 28, 24), f(a, 23), f(0b10, 22, 21); \
2126 rf(Vm, 16), f(0b00, 15, 14), f(opcode, 13, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); \
2127 }
2128
2129 INSN(fabdh, 0b1, 0b1, 0b010); // Floating-point Absolute Difference (half-precision float)
2130
2131 #undef INSN
2132
2133 // Floating-point data-processing (3 source)
2134 void data_processing(unsigned op31, unsigned type, unsigned o1, unsigned o0,
2135 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm,
2136 FloatRegister Va) {
2137 starti;
2138 f(op31, 31, 29);
2139 f(0b11111, 28, 24);
2140 f(type, 23, 22), f(o1, 21), f(o0, 15);
2141 rf(Vm, 16), rf(Va, 10), rf(Vn, 5), rf(Vd, 0);
2142 }
2143
2144 #define INSN(NAME, op31, type, o1, o0) \
2145 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, \
2146 FloatRegister Va) { \
2147 data_processing(op31, type, o1, o0, Vd, Vn, Vm, Va); \
2148 }
2149
2150 INSN(fmadds, 0b000, 0b00, 0, 0);
2151 INSN(fmsubs, 0b000, 0b00, 0, 1);
2152 INSN(fnmadds, 0b000, 0b00, 1, 0);
2153 INSN(fnmsubs, 0b000, 0b00, 1, 1);
2154
2155 INSN(fmaddd, 0b000, 0b01, 0, 0);
2156 INSN(fmsubd, 0b000, 0b01, 0, 1);
2157 INSN(fnmaddd, 0b000, 0b01, 1, 0);
2158 INSN(fnmsub, 0b000, 0b01, 1, 1);
2159
2160 INSN(fmaddh, 0b000, 0b11, 0, 0); // half-precision fused multiply-add (scalar)
2161 #undef INSN
2162
2163 // Floating-point conditional select
2164 void fp_conditional_select(unsigned op31, unsigned type,
2165 unsigned op1, unsigned op2,
2166 Condition cond, FloatRegister Vd,
2167 FloatRegister Vn, FloatRegister Vm) {
2168 starti;
2169 f(op31, 31, 29);
2170 f(0b11110, 28, 24);
2171 f(type, 23, 22);
2172 f(op1, 21, 21);
2173 f(op2, 11, 10);
2174 f(cond, 15, 12);
2175 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0);
2176 }
2177
2178 #define INSN(NAME, op31, type, op1, op2) \
2179 void NAME(FloatRegister Vd, FloatRegister Vn, \
2180 FloatRegister Vm, Condition cond) { \
2181 fp_conditional_select(op31, type, op1, op2, cond, Vd, Vn, Vm); \
2182 }
2183
2184 INSN(fcsels, 0b000, 0b00, 0b1, 0b11);
2185 INSN(fcseld, 0b000, 0b01, 0b1, 0b11);
2186
2187 #undef INSN
2188
2189 // Conversion between floating-point and integer
2190 void float_int_convert(unsigned sflag, unsigned ftype,
2191 unsigned rmode, unsigned opcode,
2192 Register Rd, Register Rn) {
2193 starti;
2194 f(sflag, 31);
2195 f(0b00, 30, 29);
2196 f(0b11110, 28, 24);
2197 f(ftype, 23, 22), f(1, 21), f(rmode, 20, 19);
2198 f(opcode, 18, 16), f(0b000000, 15, 10);
2199 zrf(Rn, 5), zrf(Rd, 0);
2200 }
2201
2202 #define INSN(NAME, sflag, ftype, rmode, opcode) \
2203 void NAME(Register Rd, FloatRegister Vn) { \
2204 float_int_convert(sflag, ftype, rmode, opcode, Rd, as_Register(Vn)); \
2205 }
2206
2207 INSN(fcvtzsw, 0b0, 0b00, 0b11, 0b000);
2208 INSN(fcvtzs, 0b1, 0b00, 0b11, 0b000);
2209 INSN(fcvtzdw, 0b0, 0b01, 0b11, 0b000);
2210 INSN(fcvtzd, 0b1, 0b01, 0b11, 0b000);
2211
2212 // RoundToNearestTiesAway
2213 INSN(fcvtassw, 0b0, 0b00, 0b00, 0b100); // float -> signed word
2214 INSN(fcvtasd, 0b1, 0b01, 0b00, 0b100); // double -> signed xword
2215
2216 // RoundTowardsNegative
2217 INSN(fcvtmssw, 0b0, 0b00, 0b10, 0b000); // float -> signed word
2218 INSN(fcvtmsd, 0b1, 0b01, 0b10, 0b000); // double -> signed xword
2219
2220 INSN(fmovs, 0b0, 0b00, 0b00, 0b110);
2221 INSN(fmovd, 0b1, 0b01, 0b00, 0b110);
2222
2223 INSN(fmovhid, 0b1, 0b10, 0b01, 0b110);
2224
2225 #undef INSN
2226
2227 #define INSN(NAME, sflag, type, rmode, opcode) \
2228 void NAME(FloatRegister Vd, Register Rn) { \
2229 float_int_convert(sflag, type, rmode, opcode, as_Register(Vd), Rn); \
2230 }
2231
2232 INSN(fmovs, 0b0, 0b00, 0b00, 0b111);
2233 INSN(fmovd, 0b1, 0b01, 0b00, 0b111);
2234
2235 INSN(scvtfws, 0b0, 0b00, 0b00, 0b010);
2236 INSN(scvtfs, 0b1, 0b00, 0b00, 0b010);
2237 INSN(scvtfwd, 0b0, 0b01, 0b00, 0b010);
2238 INSN(scvtfd, 0b1, 0b01, 0b00, 0b010);
2239
2240 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b111);
2241
2242 #undef INSN
2243
2244 private:
2245 void _xcvtf_vector_integer(bool is_unsigned, SIMD_Arrangement T,
2246 FloatRegister Rd, FloatRegister Rn) {
2247 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement");
2248 starti;
2249 f(0, 31), f(T & 1, 30), f(is_unsigned ? 1 : 0, 29);
2250 f(0b011100, 28, 23), f((T >> 1) & 1, 22), f(0b100001110110, 21, 10);
2251 rf(Rn, 5), rf(Rd, 0);
2252 }
2253
2254 public:
2255
2256 void scvtfv(SIMD_Arrangement T, FloatRegister Rd, FloatRegister Rn) {
2257 _xcvtf_vector_integer(/* is_unsigned */ false, T, Rd, Rn);
2258 }
2259
2260 // Floating-point compare
2261 void float_compare(unsigned op31, unsigned type,
2262 unsigned op, unsigned op2,
2263 FloatRegister Vn, FloatRegister Vm = as_FloatRegister(0)) {
2264 starti;
2265 f(op31, 31, 29);
2266 f(0b11110, 28, 24);
2267 f(type, 23, 22), f(1, 21);
2268 f(op, 15, 14), f(0b1000, 13, 10), f(op2, 4, 0);
2269 rf(Vn, 5), rf(Vm, 16);
2270 }
2271
2272
2273 #define INSN(NAME, op31, type, op, op2) \
2274 void NAME(FloatRegister Vn, FloatRegister Vm) { \
2275 float_compare(op31, type, op, op2, Vn, Vm); \
2276 }
2277
2278 #define INSN1(NAME, op31, type, op, op2) \
2279 void NAME(FloatRegister Vn, double d) { \
2280 assert_cond(d == 0.0); \
2281 float_compare(op31, type, op, op2, Vn); \
2282 }
2283
2284 INSN(fcmps, 0b000, 0b00, 0b00, 0b00000);
2285 INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000);
2286 // INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000);
2287 // INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000);
2288
2289 INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000);
2290 INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000);
2291 // INSN(fcmped, 0b000, 0b01, 0b00, 0b10000);
2292 // INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000);
2293
2294 #undef INSN
2295 #undef INSN1
2296
2297 // Floating-point compare. 3-registers versions (scalar).
2298 #define INSN(NAME, sz, e) \
2299 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \
2300 starti; \
2301 f(0b01111110, 31, 24), f(e, 23), f(sz, 22), f(1, 21), rf(Vm, 16); \
2302 f(0b111011, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2303 } \
2304
2305 INSN(facged, 1, 0); // facge-double
2306 INSN(facges, 0, 0); // facge-single
2307 INSN(facgtd, 1, 1); // facgt-double
2308 INSN(facgts, 0, 1); // facgt-single
2309
2310 #undef INSN
2311
2312 // Floating-point Move (immediate)
2313 private:
2314 unsigned pack(double value);
2315
2316 void fmov_imm(FloatRegister Vn, double value, unsigned size) {
2317 starti;
2318 f(0b00011110, 31, 24), f(size, 23, 22), f(1, 21);
2319 f(pack(value), 20, 13), f(0b10000000, 12, 5);
2320 rf(Vn, 0);
2321 }
2322
2323 public:
2324
2325 void fmovs(FloatRegister Vn, double value) {
2326 if (value)
2327 fmov_imm(Vn, value, 0b00);
2328 else
2329 movi(Vn, T2S, 0);
2330 }
2331 void fmovd(FloatRegister Vn, double value) {
2332 if (value)
2333 fmov_imm(Vn, value, 0b01);
2334 else
2335 movi(Vn, T1D, 0);
2336 }
2337
2338 // Floating-point data-processing (1 source)
2339
2340 // Floating-point rounding
2341 // type: half-precision = 11
2342 // single = 00
2343 // double = 01
2344 // rmode: A = Away = 100
2345 // I = current = 111
2346 // M = MinusInf = 010
2347 // N = eveN = 000
2348 // P = PlusInf = 001
2349 // X = eXact = 110
2350 // Z = Zero = 011
2351 void float_round(unsigned type, unsigned rmode, FloatRegister Rd, FloatRegister Rn) {
2352 starti;
2353 f(0b00011110, 31, 24);
2354 f(type, 23, 22);
2355 f(0b1001, 21, 18);
2356 f(rmode, 17, 15);
2357 f(0b10000, 14, 10);
2358 rf(Rn, 5), rf(Rd, 0);
2359 }
2360 #define INSN(NAME, type, rmode) \
2361 void NAME(FloatRegister Vd, FloatRegister Vn) { \
2362 float_round(type, rmode, Vd, Vn); \
2363 }
2364
2365 public:
2366 INSN(frintah, 0b11, 0b100);
2367 INSN(frintih, 0b11, 0b111);
2368 INSN(frintmh, 0b11, 0b010);
2369 INSN(frintnh, 0b11, 0b000);
2370 INSN(frintph, 0b11, 0b001);
2371 INSN(frintxh, 0b11, 0b110);
2372 INSN(frintzh, 0b11, 0b011);
2373
2374 INSN(frintas, 0b00, 0b100);
2375 INSN(frintis, 0b00, 0b111);
2376 INSN(frintms, 0b00, 0b010);
2377 INSN(frintns, 0b00, 0b000);
2378 INSN(frintps, 0b00, 0b001);
2379 INSN(frintxs, 0b00, 0b110);
2380 INSN(frintzs, 0b00, 0b011);
2381
2382 INSN(frintad, 0b01, 0b100);
2383 INSN(frintid, 0b01, 0b111);
2384 INSN(frintmd, 0b01, 0b010);
2385 INSN(frintnd, 0b01, 0b000);
2386 INSN(frintpd, 0b01, 0b001);
2387 INSN(frintxd, 0b01, 0b110);
2388 INSN(frintzd, 0b01, 0b011);
2389 #undef INSN
2390
2391 private:
2392 static short SIMD_Size_in_bytes[];
2393
2394 public:
2395 #define INSN(NAME, op) \
2396 void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \
2397 ld_st2(as_Register(Rt), adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \
2398 }
2399
2400 INSN(ldr, 1);
2401 INSN(str, 0);
2402
2403 #undef INSN
2404
2405 private:
2406
2407 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, int op1, int op2) {
2408 starti;
2409 f(0,31), f((int)T & 1, 30);
2410 f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12);
2411 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
2412 }
2413 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
2414 int imm, int op1, int op2, int regs) {
2415
2416 bool replicate = op2 >> 2 == 3;
2417 // post-index value (imm) is formed differently for replicate/non-replicate ld* instructions
2418 int expectedImmediate = replicate ? regs * (1 << (T >> 1)) : SIMD_Size_in_bytes[T] * regs;
2419 guarantee(T < T1Q , "incorrect arrangement");
2420 guarantee(imm == expectedImmediate, "bad offset");
2421 starti;
2422 f(0,31), f((int)T & 1, 30);
2423 f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12);
2424 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
2425 }
2426 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
2427 Register Xm, int op1, int op2) {
2428 starti;
2429 f(0,31), f((int)T & 1, 30);
2430 f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12);
2431 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
2432 }
2433
2434 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2, int regs) {
2435 switch (a.getMode()) {
2436 case Address::base_plus_offset:
2437 guarantee(a.offset() == 0, "no offset allowed here");
2438 ld_st(Vt, T, a.base(), op1, op2);
2439 break;
2440 case Address::post:
2441 ld_st(Vt, T, a.base(), checked_cast<int>(a.offset()), op1, op2, regs);
2442 break;
2443 case Address::post_reg:
2444 ld_st(Vt, T, a.base(), a.index(), op1, op2);
2445 break;
2446 default:
2447 ShouldNotReachHere();
2448 }
2449 }
2450
2451 // Single-structure load/store method (all addressing variants)
2452 void ld_st(FloatRegister Vt, SIMD_RegVariant T, int index, Address a,
2453 int op1, int op2, int regs) {
2454 int expectedImmediate = (regVariant_to_elemBits(T) >> 3) * regs;
2455 int sVal = (T < D) ? (index >> (2 - T)) & 0x01 : 0;
2456 int opcode = (T < D) ? (T << 2) : ((T & 0x02) << 2);
2457 int size = (T < D) ? (index & (0x3 << T)) : 1; // only care about low 2b
2458 Register Xn = a.base();
2459 int Rm;
2460
2461 switch (a.getMode()) {
2462 case Address::base_plus_offset:
2463 guarantee(a.offset() == 0, "no offset allowed here");
2464 Rm = 0;
2465 break;
2466 case Address::post:
2467 guarantee(a.offset() == expectedImmediate, "bad offset");
2468 op1 |= 0b100;
2469 Rm = 0b11111;
2470 break;
2471 case Address::post_reg:
2472 op1 |= 0b100;
2473 Rm = a.index()->encoding();
2474 break;
2475 default:
2476 ShouldNotReachHere();
2477 Rm = 0; // unreachable
2478 }
2479
2480 starti;
2481 f(0,31), f((index >> (3 - T)), 30);
2482 f(op1, 29, 21), f(Rm, 20, 16), f(op2 | opcode | sVal, 15, 12);
2483 f(size, 11, 10), srf(Xn, 5), rf(Vt, 0);
2484 }
2485
2486 public:
2487
2488 #define INSN1(NAME, op1, op2) \
2489 void NAME(FloatRegister Vt, SIMD_Arrangement T, const Address &a) { \
2490 ld_st(Vt, T, a, op1, op2, 1); \
2491 }
2492
2493 #define INSN2(NAME, op1, op2) \
2494 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_Arrangement T, const Address &a) { \
2495 assert(Vt->successor() == Vt2, "Registers must be ordered"); \
2496 ld_st(Vt, T, a, op1, op2, 2); \
2497 }
2498
2499 #define INSN3(NAME, op1, op2) \
2500 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \
2501 SIMD_Arrangement T, const Address &a) { \
2502 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3, \
2503 "Registers must be ordered"); \
2504 ld_st(Vt, T, a, op1, op2, 3); \
2505 }
2506
2507 #define INSN4(NAME, op1, op2) \
2508 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \
2509 FloatRegister Vt4, SIMD_Arrangement T, const Address &a) { \
2510 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3 && \
2511 Vt3->successor() == Vt4, "Registers must be ordered"); \
2512 ld_st(Vt, T, a, op1, op2, 4); \
2513 }
2514
2515 INSN1(ld1, 0b001100010, 0b0111);
2516 INSN2(ld1, 0b001100010, 0b1010);
2517 INSN3(ld1, 0b001100010, 0b0110);
2518 INSN4(ld1, 0b001100010, 0b0010);
2519
2520 INSN2(ld2, 0b001100010, 0b1000);
2521 INSN3(ld3, 0b001100010, 0b0100);
2522 INSN4(ld4, 0b001100010, 0b0000);
2523
2524 INSN1(st1, 0b001100000, 0b0111);
2525 INSN2(st1, 0b001100000, 0b1010);
2526 INSN3(st1, 0b001100000, 0b0110);
2527 INSN4(st1, 0b001100000, 0b0010);
2528
2529 INSN2(st2, 0b001100000, 0b1000);
2530 INSN3(st3, 0b001100000, 0b0100);
2531 INSN4(st4, 0b001100000, 0b0000);
2532
2533 INSN1(ld1r, 0b001101010, 0b1100);
2534 INSN2(ld2r, 0b001101011, 0b1100);
2535 INSN3(ld3r, 0b001101010, 0b1110);
2536 INSN4(ld4r, 0b001101011, 0b1110);
2537
2538 #undef INSN1
2539 #undef INSN2
2540 #undef INSN3
2541 #undef INSN4
2542
2543 // Handle common single-structure ld/st parameter sanity checks
2544 // for all variations (1 to 4) of SIMD reigster inputs. This
2545 // method will call the routine that generates the opcode.
2546 template<typename R, typename... Rx>
2547 void ldst_sstr(SIMD_RegVariant T, int index, const Address &a,
2548 int op1, int op2, R firstReg, Rx... otherRegs) {
2549 const FloatRegister vtSet[] = { firstReg, otherRegs... };
2550 const int regCount = sizeof...(otherRegs) + 1;
2551 assert(index >= 0 && (T <= D) && ((T == B && index <= 15) ||
2552 (T == H && index <= 7) || (T == S && index <= 3) ||
2553 (T == D && index <= 1)), "invalid index");
2554 assert(regCount >= 1 && regCount <= 4, "illegal register count");
2555
2556 // Check to make sure when multiple SIMD registers are used
2557 // that they are in successive order.
2558 for (int i = 0; i < regCount - 1; i++) {
2559 assert(vtSet[i]->successor() == vtSet[i + 1],
2560 "Registers must be ordered");
2561 }
2562
2563 ld_st(firstReg, T, index, a, op1, op2, regCount);
2564 }
2565
2566 // Define a set of INSN1/2/3/4 macros to handle single-structure
2567 // load/store instructions.
2568 #define INSN1(NAME, op1, op2) \
2569 void NAME(FloatRegister Vt, SIMD_RegVariant T, int index, \
2570 const Address &a) { \
2571 ldst_sstr(T, index, a, op1, op2, Vt); \
2572 }
2573
2574 #define INSN2(NAME, op1, op2) \
2575 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_RegVariant T, \
2576 int index, const Address &a) { \
2577 ldst_sstr(T, index, a, op1, op2, Vt, Vt2); \
2578 }
2579
2580 #define INSN3(NAME, op1, op2) \
2581 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \
2582 SIMD_RegVariant T, int index, const Address &a) { \
2583 ldst_sstr(T, index, a, op1, op2, Vt, Vt2, Vt3); \
2584 }
2585
2586 #define INSN4(NAME, op1, op2) \
2587 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \
2588 FloatRegister Vt4, SIMD_RegVariant T, int index, \
2589 const Address &a) { \
2590 ldst_sstr(T, index, a, op1, op2, Vt, Vt2, Vt3, Vt4); \
2591 }
2592
2593 INSN1(ld1, 0b001101010, 0b0000);
2594 INSN2(ld2, 0b001101011, 0b0000);
2595 INSN3(ld3, 0b001101010, 0b0010);
2596 INSN4(ld4, 0b001101011, 0b0010);
2597
2598 INSN1(st1, 0b001101000, 0b0000);
2599 INSN2(st2, 0b001101001, 0b0000);
2600 INSN3(st3, 0b001101000, 0b0010);
2601 INSN4(st4, 0b001101001, 0b0010);
2602
2603 #undef INSN1
2604 #undef INSN2
2605 #undef INSN3
2606 #undef INSN4
2607
2608 #define INSN(NAME, opc) \
2609 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
2610 starti; \
2611 assert(T == T8B || T == T16B, "must be T8B or T16B"); \
2612 f(0, 31), f((int)T & 1, 30), f(opc, 29, 21); \
2613 rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2614 }
2615
2616 INSN(eor, 0b101110001);
2617 INSN(orr, 0b001110101);
2618 INSN(andr, 0b001110001);
2619 INSN(bic, 0b001110011);
2620 INSN(bif, 0b101110111);
2621 INSN(bit, 0b101110101);
2622 INSN(bsl, 0b101110011);
2623 INSN(orn, 0b001110111);
2624
2625 #undef INSN
2626
2627 // Advanced SIMD three different
2628 #define INSN(NAME, opc, opc2, acceptT2D) \
2629 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
2630 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \
2631 if (!acceptT2D) guarantee(T != T2D, "incorrect arrangement"); \
2632 if (opc2 == 0b101101) guarantee(T != T8B && T != T16B, "incorrect arrangement"); \
2633 starti; \
2634 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \
2635 f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \
2636 rf(Vn, 5), rf(Vd, 0); \
2637 }
2638
2639 INSN(addv, 0, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2640 INSN(subv, 1, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2641 INSN(sqaddv, 0, 0b000011, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2642 INSN(sqsubv, 0, 0b001011, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2643 INSN(uqaddv, 1, 0b000011, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2644 INSN(uqsubv, 1, 0b001011, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2645 INSN(mulv, 0, 0b100111, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2646 INSN(mlav, 0, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2647 INSN(mlsv, 1, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2648 INSN(sshl, 0, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2649 INSN(ushl, 1, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2650 INSN(addpv, 0, 0b101111, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2651 INSN(smullv, 0, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2652 INSN(umullv, 1, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2653 INSN(smlalv, 0, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2654 INSN(umlalv, 1, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2655 INSN(maxv, 0, 0b011001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2656 INSN(minv, 0, 0b011011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2657 INSN(umaxv, 1, 0b011001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2658 INSN(uminv, 1, 0b011011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2659 INSN(smaxp, 0, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2660 INSN(sminp, 0, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2661 INSN(umaxp, 1, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2662 INSN(uminp, 1, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2663 INSN(sqdmulh,0, 0b101101, false); // accepted arrangements: T4H, T8H, T2S, T4S
2664 INSN(shsubv, 0, 0b001001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2665
2666 #undef INSN
2667
2668 // Advanced SIMD across lanes
2669 #define INSN(NAME, opc, opc2, accepted) \
2670 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
2671 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \
2672 if (accepted < 3) guarantee(T != T2D, "incorrect arrangement"); \
2673 if (accepted < 2) guarantee(T != T2S, "incorrect arrangement"); \
2674 if (accepted < 1) guarantee(T == T8B || T == T16B, "incorrect arrangement"); \
2675 starti; \
2676 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \
2677 f((int)T >> 1, 23, 22), f(opc2, 21, 10); \
2678 rf(Vn, 5), rf(Vd, 0); \
2679 }
2680
2681 INSN(absr, 0, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2682 INSN(negr, 1, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
2683 INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B
2684 INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
2685 INSN(smaxv, 0, 0b110000101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
2686 INSN(umaxv, 1, 0b110000101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
2687 INSN(sminv, 0, 0b110001101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
2688 INSN(uminv, 1, 0b110001101010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
2689 INSN(cls, 0, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2690 INSN(clz, 1, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2691 INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B
2692 INSN(uaddlp, 1, 0b100000001010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
2693 INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
2694
2695 #undef INSN
2696
2697 #define INSN(NAME, opc) \
2698 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
2699 starti; \
2700 assert(T == T4S, "arrangement must be T4S"); \
2701 f(0, 31), f((int)T & 1, 30), f(0b101110, 29, 24), f(opc, 23), \
2702 f(T == T4S ? 0 : 1, 22), f(0b110000111110, 21, 10); rf(Vn, 5), rf(Vd, 0); \
2703 }
2704
2705 INSN(fmaxv, 0);
2706 INSN(fminv, 1);
2707
2708 #undef INSN
2709
2710 // Advanced SIMD modified immediate
2711 #define INSN(NAME, op0, cmode0) \
2712 void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \
2713 unsigned cmode = cmode0; \
2714 unsigned op = op0; \
2715 starti; \
2716 assert(lsl == 0 || \
2717 ((T == T4H || T == T8H) && lsl == 8) || \
2718 ((T == T2S || T == T4S) && ((lsl >> 3) < 4) && ((lsl & 7) == 0)), "invalid shift");\
2719 cmode |= lsl >> 2; \
2720 if (T == T4H || T == T8H) cmode |= 0b1000; \
2721 if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \
2722 assert(op == 0 && cmode0 == 0, "must be MOVI"); \
2723 cmode = 0b1110; \
2724 if (T == T1D || T == T2D) op = 1; \
2725 } \
2726 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \
2727 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \
2728 rf(Vd, 0); \
2729 }
2730
2731 INSN(movi, 0, 0);
2732 INSN(orri, 0, 1);
2733 INSN(mvni, 1, 0);
2734 INSN(bici, 1, 1);
2735
2736 #undef INSN
2737
2738 #define INSN(NAME, op, cmode) \
2739 void NAME(FloatRegister Vd, SIMD_Arrangement T, double imm) { \
2740 unsigned imm8 = pack(imm); \
2741 starti; \
2742 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \
2743 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \
2744 rf(Vd, 0); \
2745 }
2746
2747 INSN(fmovs, 0, 0b1111);
2748 INSN(fmovd, 1, 0b1111);
2749
2750 #undef INSN
2751
2752 // Advanced SIMD three same
2753 void adv_simd_three_same(Instruction_aarch64 ¤t_insn, FloatRegister Vd,
2754 SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm,
2755 int op1, int op2, int op3);
2756 #define INSN(NAME, op1, op2, op3) \
2757 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
2758 starti; \
2759 adv_simd_three_same(current_insn, Vd, T, Vn, Vm, op1, op2, op3); \
2760 }
2761 INSN(fabd, 1, 1, 0b0101);
2762 INSN(fadd, 0, 0, 0b0101);
2763 INSN(fdiv, 1, 0, 0b1111);
2764 INSN(faddp, 1, 0, 0b0101);
2765 INSN(fmul, 1, 0, 0b0111);
2766 INSN(fsub, 0, 1, 0b0101);
2767 INSN(fmla, 0, 0, 0b0011);
2768 INSN(fmls, 0, 1, 0b0011);
2769 INSN(fmax, 0, 0, 0b1101);
2770 INSN(fmin, 0, 1, 0b1101);
2771 INSN(facgt, 1, 1, 0b1011);
2772
2773 #undef INSN
2774
2775 // AdvSIMD vector compare
2776 void cm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) {
2777 starti;
2778 assert(T != T1Q && T != T1D, "incorrect arrangement");
2779 int cond_op;
2780 switch (cond) {
2781 case EQ: cond_op = 0b110001; break;
2782 case GT: cond_op = 0b000110; break;
2783 case GE: cond_op = 0b000111; break;
2784 case HI: cond_op = 0b100110; break;
2785 case HS: cond_op = 0b100111; break;
2786 default:
2787 ShouldNotReachHere();
2788 break;
2789 }
2790
2791 f(0, 31), f((int)T & 1, 30), f((cond_op >> 5) & 1, 29);
2792 f(0b01110, 28, 24), f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16);
2793 f(cond_op & 0b11111, 15, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0);
2794 }
2795
2796 // AdvSIMD Floating-point vector compare
2797 void fcm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) {
2798 starti;
2799 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement");
2800 int cond_op;
2801 switch (cond) {
2802 case EQ: cond_op = 0b00; break;
2803 case GT: cond_op = 0b11; break;
2804 case GE: cond_op = 0b10; break;
2805 default:
2806 ShouldNotReachHere();
2807 break;
2808 }
2809
2810 f(0, 31), f((int)T & 1, 30), f((cond_op >> 1) & 1, 29);
2811 f(0b01110, 28, 24), f(cond_op & 1, 23), f(T == T2D ? 1 : 0, 22);
2812 f(1, 21), rf(Vm, 16), f(0b111001, 15, 10), rf(Vn, 5), rf(Vd, 0);
2813 }
2814
2815 #define INSN(NAME, opc) \
2816 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
2817 starti; \
2818 assert(T == T4S, "arrangement must be T4S"); \
2819 f(0b01011110000, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2820 }
2821
2822 INSN(sha1c, 0b000000);
2823 INSN(sha1m, 0b001000);
2824 INSN(sha1p, 0b000100);
2825 INSN(sha1su0, 0b001100);
2826 INSN(sha256h2, 0b010100);
2827 INSN(sha256h, 0b010000);
2828 INSN(sha256su1, 0b011000);
2829
2830 #undef INSN
2831
2832 #define INSN(NAME, opc) \
2833 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
2834 starti; \
2835 assert(T == T4S, "arrangement must be T4S"); \
2836 f(0b0101111000101000, 31, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2837 }
2838
2839 INSN(sha1h, 0b000010);
2840 INSN(sha1su1, 0b000110);
2841 INSN(sha256su0, 0b001010);
2842
2843 #undef INSN
2844
2845 #define INSN(NAME, opc) \
2846 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
2847 starti; \
2848 assert(T == T2D, "arrangement must be T2D"); \
2849 f(0b11001110011, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2850 }
2851
2852 INSN(sha512h, 0b100000);
2853 INSN(sha512h2, 0b100001);
2854 INSN(sha512su1, 0b100010);
2855
2856 #undef INSN
2857
2858 #define INSN(NAME, opc) \
2859 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
2860 starti; \
2861 assert(T == T2D, "arrangement must be T2D"); \
2862 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \
2863 }
2864
2865 INSN(sha512su0, 0b1100111011000000100000);
2866
2867 #undef INSN
2868
2869 #define INSN(NAME, opc) \
2870 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, FloatRegister Va) { \
2871 starti; \
2872 assert(T == T16B, "arrangement must be T16B"); \
2873 f(0b11001110, 31, 24), f(opc, 23, 21), rf(Vm, 16), f(0b0, 15, 15), rf(Va, 10), rf(Vn, 5), rf(Vd, 0); \
2874 }
2875
2876 INSN(eor3, 0b000);
2877 INSN(bcax, 0b001);
2878
2879 #undef INSN
2880
2881 #define INSN(NAME, opc) \
2882 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, unsigned imm) { \
2883 starti; \
2884 assert(T == T2D, "arrangement must be T2D"); \
2885 f(0b11001110, 31, 24), f(opc, 23, 21), rf(Vm, 16), f(imm, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2886 }
2887
2888 INSN(xar, 0b100);
2889
2890 #undef INSN
2891
2892 #define INSN(NAME, opc) \
2893 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
2894 starti; \
2895 assert(T == T2D, "arrangement must be T2D"); \
2896 f(0b11001110, 31, 24), f(opc, 23, 21), rf(Vm, 16), f(0b100011, 15, 10), rf(Vn, 5), rf(Vd, 0); \
2897 }
2898
2899 INSN(rax1, 0b011);
2900
2901 #undef INSN
2902
2903 #define INSN(NAME, opc) \
2904 void NAME(FloatRegister Vd, FloatRegister Vn) { \
2905 starti; \
2906 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \
2907 }
2908
2909 INSN(aese, 0b0100111000101000010010);
2910 INSN(aesd, 0b0100111000101000010110);
2911 INSN(aesmc, 0b0100111000101000011010);
2912 INSN(aesimc, 0b0100111000101000011110);
2913
2914 #undef INSN
2915
2916 #define INSN(NAME, op1, op2) \
2917 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index = 0) { \
2918 starti; \
2919 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \
2920 assert(index >= 0 && ((T == T2D && index <= 1) || (T != T2D && index <= 3)), "invalid index"); \
2921 f(0, 31), f((int)T & 1, 30), f(op1, 29); f(0b011111, 28, 23); \
2922 f(T == T2D ? 1 : 0, 22), f(T == T2D ? 0 : index & 1, 21), rf(Vm, 16); \
2923 f(op2, 15, 12), f(T == T2D ? index : (index >> 1), 11), f(0, 10); \
2924 rf(Vn, 5), rf(Vd, 0); \
2925 }
2926
2927 // FMLA/FMLS - Vector - Scalar
2928 INSN(fmlavs, 0, 0b0001);
2929 INSN(fmlsvs, 0, 0b0101);
2930 // FMULX - Vector - Scalar
2931 INSN(fmulxvs, 1, 0b1001);
2932
2933 #undef INSN
2934
2935 #define INSN(NAME, op1, op2) \
2936 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index) { \
2937 starti; \
2938 assert(T == T4H || T == T8H || T == T2S || T == T4S, "invalid arrangement"); \
2939 assert(index >= 0 && \
2940 ((T == T2S && index <= 1) || (T != T2S && index <= 3) || (T == T8H && index <= 7)), \
2941 "invalid index"); \
2942 assert((T != T4H && T != T8H) || Vm->encoding() < 16, "invalid source SIMD&FP register"); \
2943 f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01111, 28, 24); \
2944 if (T == T4H || T == T8H) { \
2945 f(0b01, 23, 22), f(index & 0b11, 21, 20), lrf(Vm, 16), f(index >> 2 & 1, 11); \
2946 } else { \
2947 f(0b10, 23, 22), f(index & 1, 21), rf(Vm, 16), f(index >> 1, 11); \
2948 } \
2949 f(op2, 15, 12), f(0, 10), rf(Vn, 5), rf(Vd, 0); \
2950 }
2951
2952 // MUL - Vector - Scalar
2953 INSN(mulvs, 0, 0b1000);
2954
2955 #undef INSN
2956
2957 // Floating-point Reciprocal Estimate
2958 void frecpe(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) {
2959 assert(type == D || type == S, "Wrong type for frecpe");
2960 starti;
2961 f(0b010111101, 31, 23);
2962 f(type == D ? 1 : 0, 22);
2963 f(0b100001110110, 21, 10);
2964 rf(Vn, 5), rf(Vd, 0);
2965 }
2966
2967 // (long) {a, b} -> (a + b)
2968 void addpd(FloatRegister Vd, FloatRegister Vn) {
2969 starti;
2970 f(0b0101111011110001101110, 31, 10);
2971 rf(Vn, 5), rf(Vd, 0);
2972 }
2973
2974 // Floating-point AdvSIMD scalar pairwise
2975 #define INSN(NAME, op1, op2) \
2976 void NAME(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) { \
2977 starti; \
2978 assert(type == D || type == S, "Wrong type for faddp/fmaxp/fminp"); \
2979 f(0b0111111, 31, 25), f(op1, 24, 23), \
2980 f(type == S ? 0 : 1, 22), f(0b11000, 21, 17), f(op2, 16, 10), rf(Vn, 5), rf(Vd, 0); \
2981 }
2982
2983 INSN(faddp, 0b00, 0b0110110);
2984 INSN(fmaxp, 0b00, 0b0111110);
2985 INSN(fminp, 0b01, 0b0111110);
2986
2987 #undef INSN
2988
2989 void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) {
2990 starti;
2991 assert(T != Q, "invalid register variant");
2992 f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15);
2993 f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0);
2994 }
2995
2996 #define INSN(NAME, cond, op1, op2) \
2997 void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \
2998 starti; \
2999 assert(cond, "invalid register variant"); \
3000 f(0, 31), f(op1, 30), f(0b001110000, 29, 21); \
3001 f(((idx << 1) | 1) << (int)T, 20, 16), f(op2, 15, 10); \
3002 rf(Vn, 5), rf(Rd, 0); \
3003 }
3004
3005 INSN(umov, (T != Q), (T == D ? 1 : 0), 0b001111);
3006 INSN(smov, (T < D), 1, 0b001011);
3007
3008 #undef INSN
3009
3010 #define INSN(NAME, opc, opc2, isSHR) \
3011 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \
3012 starti; \
3013 /* The encodings for the immh:immb fields (bits 22:16) in *SHR are \
3014 * 0001 xxx 8B/16B, shift = 16 - UInt(immh:immb) \
3015 * 001x xxx 4H/8H, shift = 32 - UInt(immh:immb) \
3016 * 01xx xxx 2S/4S, shift = 64 - UInt(immh:immb) \
3017 * 1xxx xxx 1D/2D, shift = 128 - UInt(immh:immb) \
3018 * (1D is RESERVED) \
3019 * for SHL shift is calculated as: \
3020 * 0001 xxx 8B/16B, shift = UInt(immh:immb) - 8 \
3021 * 001x xxx 4H/8H, shift = UInt(immh:immb) - 16 \
3022 * 01xx xxx 2S/4S, shift = UInt(immh:immb) - 32 \
3023 * 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \
3024 * (1D is RESERVED) \
3025 */ \
3026 guarantee(!isSHR || (isSHR && (shift != 0)), "impossible encoding");\
3027 assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \
3028 int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \
3029 int encodedShift = isSHR ? cVal - shift : cVal + shift; \
3030 f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \
3031 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \
3032 }
3033
3034 INSN(shl, 0, 0b010101, /* isSHR = */ false);
3035 INSN(sshr, 0, 0b000001, /* isSHR = */ true);
3036 INSN(ushr, 1, 0b000001, /* isSHR = */ true);
3037 INSN(usra, 1, 0b000101, /* isSHR = */ true);
3038 INSN(ssra, 0, 0b000101, /* isSHR = */ true);
3039 INSN(sli, 1, 0b010101, /* isSHR = */ false);
3040
3041 #undef INSN
3042
3043 #define INSN(NAME, opc, opc2, isSHR) \
3044 void NAME(FloatRegister Vd, FloatRegister Vn, int shift){ \
3045 starti; \
3046 int encodedShift = isSHR ? 128 - shift : 64 + shift; \
3047 f(0b01, 31, 30), f(opc, 29), f(0b111110, 28, 23), \
3048 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \
3049 }
3050
3051 INSN(shld, 0, 0b010101, /* isSHR = */ false);
3052 INSN(sshrd, 0, 0b000001, /* isSHR = */ true);
3053 INSN(ushrd, 1, 0b000001, /* isSHR = */ true);
3054
3055 #undef INSN
3056
3057 protected:
3058 void _xshll(bool is_unsigned, FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
3059 starti;
3060 /* The encodings for the immh:immb fields (bits 22:16) are
3061 * 0001 xxx 8H, 8B/16B shift = xxx
3062 * 001x xxx 4S, 4H/8H shift = xxxx
3063 * 01xx xxx 2D, 2S/4S shift = xxxxx
3064 * 1xxx xxx RESERVED
3065 */
3066 assert((Tb >> 1) + 1 == (Ta >> 1), "Incompatible arrangement");
3067 assert((1 << ((Tb>>1)+3)) > shift, "Invalid shift value");
3068 f(0, 31), f(Tb & 1, 30), f(is_unsigned ? 1 : 0, 29), f(0b011110, 28, 23);
3069 f((1 << ((Tb>>1)+3))|shift, 22, 16);
3070 f(0b101001, 15, 10), rf(Vn, 5), rf(Vd, 0);
3071 }
3072
3073 public:
3074 void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
3075 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement");
3076 _xshll(/* is_unsigned */ true, Vd, Ta, Vn, Tb, shift);
3077 }
3078
3079 void ushll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
3080 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement");
3081 _xshll(/* is_unsigned */ true, Vd, Ta, Vn, Tb, shift);
3082 }
3083
3084 void uxtl(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) {
3085 ushll(Vd, Ta, Vn, Tb, 0);
3086 }
3087
3088 void sshll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
3089 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement");
3090 _xshll(/* is_unsigned */ false, Vd, Ta, Vn, Tb, shift);
3091 }
3092
3093 void sshll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
3094 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement");
3095 _xshll(/* is_unsigned */ false, Vd, Ta, Vn, Tb, shift);
3096 }
3097
3098 void sxtl(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb) {
3099 sshll(Vd, Ta, Vn, Tb, 0);
3100 }
3101
3102 // Move from general purpose register
3103 // mov Vd.T[index], Rn
3104 void mov(FloatRegister Vd, SIMD_RegVariant T, int index, Register Xn) {
3105 guarantee(T != Q, "invalid register variant");
3106 starti;
3107 f(0b01001110000, 31, 21), f(((1 << T) | (index << (T + 1))), 20, 16);
3108 f(0b000111, 15, 10), zrf(Xn, 5), rf(Vd, 0);
3109 }
3110
3111 // Move to general purpose register
3112 // mov Rd, Vn.T[index]
3113 void mov(Register Xd, FloatRegister Vn, SIMD_RegVariant T, int index) {
3114 guarantee(T == S || T == D, "invalid register variant");
3115 umov(Xd, Vn, T, index);
3116 }
3117
3118 protected:
3119 void _xaddwv(bool is_unsigned, FloatRegister Vd, FloatRegister Vn, SIMD_Arrangement Ta,
3120 FloatRegister Vm, SIMD_Arrangement Tb) {
3121 starti;
3122 assert((Tb >> 1) + 1 == (Ta >> 1), "Incompatible arrangement");
3123 f(0, 31), f((int)Tb & 1, 30), f(is_unsigned ? 1 : 0, 29), f(0b01110, 28, 24);
3124 f((int)(Ta >> 1) - 1, 23, 22), f(1, 21), rf(Vm, 16), f(0b000100, 15, 10), rf(Vn, 5), rf(Vd, 0);
3125 }
3126
3127 public:
3128 #define INSN(NAME, assertion, is_unsigned) \
3129 void NAME(FloatRegister Vd, FloatRegister Vn, SIMD_Arrangement Ta, FloatRegister Vm, \
3130 SIMD_Arrangement Tb) { \
3131 assert((assertion), "invalid arrangement"); \
3132 _xaddwv(is_unsigned, Vd, Vn, Ta, Vm, Tb); \
3133 }
3134
3135 public:
3136
3137 INSN(uaddwv, Tb == T8B || Tb == T4H || Tb == T2S, /*is_unsigned*/true)
3138 INSN(uaddwv2, Tb == T16B || Tb == T8H || Tb == T4S, /*is_unsigned*/true)
3139 INSN(saddwv, Tb == T8B || Tb == T4H || Tb == T2S, /*is_unsigned*/false)
3140 INSN(saddwv2, Tb == T16B || Tb == T8H || Tb == T4S, /*is_unsigned*/false)
3141
3142 #undef INSN
3143
3144
3145 private:
3146 void _pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
3147 starti;
3148 assert((Ta == T1Q && (Tb == T1D || Tb == T2D)) ||
3149 (Ta == T8H && (Tb == T8B || Tb == T16B)), "Invalid Size specifier");
3150 int size = (Ta == T1Q) ? 0b11 : 0b00;
3151 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size, 23, 22);
3152 f(1, 21), rf(Vm, 16), f(0b111000, 15, 10), rf(Vn, 5), rf(Vd, 0);
3153 }
3154
3155 public:
3156 void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
3157 assert(Tb == T1D || Tb == T8B, "pmull assumes T1D or T8B as the second size specifier");
3158 _pmull(Vd, Ta, Vn, Vm, Tb);
3159 }
3160
3161 void pmull2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
3162 assert(Tb == T2D || Tb == T16B, "pmull2 assumes T2D or T16B as the second size specifier");
3163 _pmull(Vd, Ta, Vn, Vm, Tb);
3164 }
3165
3166 void uqxtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) {
3167 starti;
3168 int size_b = (int)Tb >> 1;
3169 int size_a = (int)Ta >> 1;
3170 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier");
3171 f(0, 31), f(Tb & 1, 30), f(0b101110, 29, 24), f(size_b, 23, 22);
3172 f(0b100001010010, 21, 10), rf(Vn, 5), rf(Vd, 0);
3173 }
3174
3175 void xtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) {
3176 starti;
3177 int size_b = (int)Tb >> 1;
3178 int size_a = (int)Ta >> 1;
3179 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier");
3180 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size_b, 23, 22);
3181 f(0b100001001010, 21, 10), rf(Vn, 5), rf(Vd, 0);
3182 }
3183
3184 void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs)
3185 {
3186 starti;
3187 assert(T != T1D, "reserved encoding");
3188 f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21);
3189 f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), zrf(Xs, 5), rf(Vd, 0);
3190 }
3191
3192 void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0)
3193 {
3194 starti;
3195 assert(T != T1D, "reserved encoding");
3196 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21);
3197 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16);
3198 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0);
3199 }
3200
3201 // Advanced SIMD scalar copy
3202 void dup(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int index = 0)
3203 {
3204 starti;
3205 assert(T != Q, "invalid size");
3206 f(0b01011110000, 31, 21);
3207 f((1 << T) | (index << (T + 1)), 20, 16);
3208 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0);
3209 }
3210
3211 // AdvSIMD ZIP/UZP/TRN
3212 #define INSN(NAME, opcode) \
3213 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
3214 guarantee(T != T1D && T != T1Q, "invalid arrangement"); \
3215 starti; \
3216 f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \
3217 f(opcode, 14, 12), f(0b10, 11, 10); \
3218 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \
3219 f(T & 1, 30), f(T >> 1, 23, 22); \
3220 }
3221
3222 INSN(uzp1, 0b001);
3223 INSN(trn1, 0b010);
3224 INSN(zip1, 0b011);
3225 INSN(uzp2, 0b101);
3226 INSN(trn2, 0b110);
3227 INSN(zip2, 0b111);
3228
3229 #undef INSN
3230
3231 // CRC32 instructions
3232 #define INSN(NAME, c, sf, sz) \
3233 void NAME(Register Rd, Register Rn, Register Rm) { \
3234 starti; \
3235 f(sf, 31), f(0b0011010110, 30, 21), f(0b010, 15, 13), f(c, 12); \
3236 f(sz, 11, 10), rf(Rm, 16), rf(Rn, 5), rf(Rd, 0); \
3237 }
3238
3239 INSN(crc32b, 0, 0, 0b00);
3240 INSN(crc32h, 0, 0, 0b01);
3241 INSN(crc32w, 0, 0, 0b10);
3242 INSN(crc32x, 0, 1, 0b11);
3243 INSN(crc32cb, 1, 0, 0b00);
3244 INSN(crc32ch, 1, 0, 0b01);
3245 INSN(crc32cw, 1, 0, 0b10);
3246 INSN(crc32cx, 1, 1, 0b11);
3247
3248 #undef INSN
3249
3250 // Table vector lookup
3251 #define INSN(NAME, op) \
3252 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \
3253 starti; \
3254 assert(T == T8B || T == T16B, "invalid arrangement"); \
3255 assert(0 < registers && registers <= 4, "invalid number of registers"); \
3256 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \
3257 f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \
3258 }
3259
3260 INSN(tbl, 0);
3261 INSN(tbx, 1);
3262
3263 #undef INSN
3264
3265 // AdvSIMD two-reg misc
3266 // In this instruction group, the 2 bits in the size field ([23:22]) may be
3267 // fixed or determined by the "SIMD_Arrangement T", or both. The additional
3268 // parameter "tmask" is a 2-bit mask used to indicate which bits in the size
3269 // field are determined by the SIMD_Arrangement. The bit of "tmask" should be
3270 // set to 1 if corresponding bit marked as "x" in the ArmARM.
3271 #define INSN(NAME, U, size, tmask, opcode) \
3272 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
3273 starti; \
3274 assert((ASSERTION), MSG); \
3275 int op22 = (int)(T >> 1) & tmask; \
3276 int op19 = 0b00; \
3277 if (tmask == 0b01 && (T == T4H || T == T8H)) { \
3278 op22 = 0b1; \
3279 op19 = 0b11; \
3280 } \
3281 f(0, 31), f((int)T & 1, 30), f(U, 29), f(0b01110, 28, 24); \
3282 f(size | op22, 23, 22), f(1, 21), f(op19, 20, 19), f(0b00, 18, 17); \
3283 f(opcode, 16, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); \
3284 }
3285
3286 #define MSG "invalid arrangement"
3287
3288 #define ASSERTION (T == T4H || T == T8H || T == T2S || T == T4S || T == T2D)
3289 INSN(fsqrt, 1, 0b10, 0b01, 0b11111);
3290 INSN(fabs, 0, 0b10, 0b01, 0b01111);
3291 INSN(fneg, 1, 0b10, 0b01, 0b01111);
3292 INSN(frintn, 0, 0b00, 0b01, 0b11000);
3293 INSN(frintm, 0, 0b00, 0b01, 0b11001);
3294 INSN(frintp, 0, 0b10, 0b01, 0b11000);
3295 INSN(fcvtas, 0, 0b00, 0b01, 0b11100);
3296 INSN(fcvtzs, 0, 0b10, 0b01, 0b11011);
3297 INSN(fcvtms, 0, 0b00, 0b01, 0b11011);
3298 #undef ASSERTION
3299
3300 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S)
3301 INSN(rev64, 0, 0b00, 0b11, 0b00000);
3302 #undef ASSERTION
3303
3304 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H)
3305 INSN(rev32, 1, 0b00, 0b11, 0b00000);
3306 #undef ASSERTION
3307
3308 #define ASSERTION (T == T8B || T == T16B)
3309 INSN(rev16, 0, 0b00, 0b11, 0b00001);
3310 INSN(rbit, 1, 0b01, 0b00, 0b00101);
3311 #undef ASSERTION
3312
3313 #undef MSG
3314
3315 #undef INSN
3316
3317 // AdvSIMD compare with zero (vector)
3318 void cm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
3319 starti;
3320 assert(T != T1Q && T != T1D, "invalid arrangement");
3321 int cond_op;
3322 switch (cond) {
3323 case EQ: cond_op = 0b001; break;
3324 case GE: cond_op = 0b100; break;
3325 case GT: cond_op = 0b000; break;
3326 case LE: cond_op = 0b101; break;
3327 case LT: cond_op = 0b010; break;
3328 default:
3329 ShouldNotReachHere();
3330 break;
3331 }
3332
3333 f(0, 31), f((int)T & 1, 30), f((cond_op >> 2) & 1, 29);
3334 f(0b01110, 28, 24), f((int)T >> 1, 23, 22), f(0b10000010, 21, 14);
3335 f(cond_op & 0b11, 13, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0);
3336 }
3337
3338 // AdvSIMD Floating-point compare with zero (vector)
3339 void fcm(Condition cond, FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
3340 starti;
3341 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement");
3342 int cond_op;
3343 switch (cond) {
3344 case EQ: cond_op = 0b010; break;
3345 case GT: cond_op = 0b000; break;
3346 case GE: cond_op = 0b001; break;
3347 case LE: cond_op = 0b011; break;
3348 case LT: cond_op = 0b100; break;
3349 default:
3350 ShouldNotReachHere();
3351 break;
3352 }
3353
3354 f(0, 31), f((int)T & 1, 30), f(cond_op & 1, 29), f(0b011101, 28, 23);
3355 f(((int)(T >> 1) & 1), 22), f(0b10000011, 21, 14);
3356 f((cond_op >> 1) & 0b11, 13, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0);
3357 }
3358
3359 void ext(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index)
3360 {
3361 starti;
3362 assert(T == T8B || T == T16B, "invalid arrangement");
3363 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value");
3364 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21);
3365 rf(Vm, 16), f(0, 15), f(index, 14, 11);
3366 f(0, 10), rf(Vn, 5), rf(Vd, 0);
3367 }
3368
3369 // SVE arithmetic - unpredicated
3370 #define INSN(NAME, opcode) \
3371 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \
3372 starti; \
3373 assert(T != Q, "invalid register variant"); \
3374 f(0b00000100, 31, 24), f(T, 23, 22), f(1, 21), \
3375 rf(Zm, 16), f(0, 15, 13), f(opcode, 12, 10), rf(Zn, 5), rf(Zd, 0); \
3376 }
3377 INSN(sve_add, 0b000);
3378 INSN(sve_sub, 0b001);
3379 INSN(sve_sqadd, 0b100);
3380 INSN(sve_sqsub, 0b110);
3381 INSN(sve_uqadd, 0b101);
3382 INSN(sve_uqsub, 0b111);
3383 #undef INSN
3384
3385 // SVE integer add/subtract immediate (unpredicated)
3386 #define INSN(NAME, op) \
3387 void NAME(FloatRegister Zd, SIMD_RegVariant T, unsigned imm8) { \
3388 starti; \
3389 /* The immediate is an unsigned value in the range 0 to 255, and \
3390 * for element width of 16 bits or higher it may also be a \
3391 * positive multiple of 256 in the range 256 to 65280. \
3392 */ \
3393 assert(T != Q, "invalid size"); \
3394 int sh = 0; \
3395 if (imm8 <= 0xff) { \
3396 sh = 0; \
3397 } else if (T != B && imm8 <= 0xff00 && (imm8 & 0xff) == 0) { \
3398 sh = 1; \
3399 imm8 = (imm8 >> 8); \
3400 } else { \
3401 guarantee(false, "invalid immediate"); \
3402 } \
3403 f(0b00100101, 31, 24), f(T, 23, 22), f(0b10000, 21, 17); \
3404 f(op, 16, 14), f(sh, 13), f(imm8, 12, 5), rf(Zd, 0); \
3405 }
3406
3407 INSN(sve_add, 0b011);
3408 INSN(sve_sub, 0b111);
3409 #undef INSN
3410
3411 // SVE floating-point arithmetic - unpredicated
3412 #define INSN(NAME, opcode) \
3413 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \
3414 starti; \
3415 assert(T == H || T == S || T == D, "invalid register variant"); \
3416 f(0b01100101, 31, 24), f(T, 23, 22), f(0, 21), \
3417 rf(Zm, 16), f(0, 15, 13), f(opcode, 12, 10), rf(Zn, 5), rf(Zd, 0); \
3418 }
3419
3420 INSN(sve_fadd, 0b000);
3421 INSN(sve_fmul, 0b010);
3422 INSN(sve_fsub, 0b001);
3423 #undef INSN
3424
3425 private:
3426 void sve_predicate_reg_insn(unsigned op24, unsigned op13,
3427 FloatRegister Zd_or_Vd, SIMD_RegVariant T,
3428 PRegister Pg, FloatRegister Zn_or_Vn) {
3429 starti;
3430 f(op24, 31, 24), f(T, 23, 22), f(op13, 21, 13);
3431 pgrf(Pg, 10), rf(Zn_or_Vn, 5), rf(Zd_or_Vd, 0);
3432 }
3433
3434 void sve_shift_imm_encoding(SIMD_RegVariant T, int shift, bool isSHR,
3435 int& tszh, int& tszl_imm) {
3436 /* The encodings for the tszh:tszl:imm3 fields
3437 * for shift right is calculated as:
3438 * 0001 xxx B, shift = 16 - UInt(tszh:tszl:imm3)
3439 * 001x xxx H, shift = 32 - UInt(tszh:tszl:imm3)
3440 * 01xx xxx S, shift = 64 - UInt(tszh:tszl:imm3)
3441 * 1xxx xxx D, shift = 128 - UInt(tszh:tszl:imm3)
3442 * for shift left is calculated as:
3443 * 0001 xxx B, shift = UInt(tszh:tszl:imm3) - 8
3444 * 001x xxx H, shift = UInt(tszh:tszl:imm3) - 16
3445 * 01xx xxx S, shift = UInt(tszh:tszl:imm3) - 32
3446 * 1xxx xxx D, shift = UInt(tszh:tszl:imm3) - 64
3447 */
3448 assert(T != Q, "Invalid register variant");
3449 if (isSHR) {
3450 assert(((1 << (T + 3)) >= shift) && (shift > 0) , "Invalid shift value");
3451 } else {
3452 assert(((1 << (T + 3)) > shift) && (shift >= 0) , "Invalid shift value");
3453 }
3454 int cVal = (1 << ((T + 3) + (isSHR ? 1 : 0)));
3455 int encodedShift = isSHR ? cVal - shift : cVal + shift;
3456 tszh = encodedShift >> 5;
3457 tszl_imm = encodedShift & 0x1f;
3458 }
3459
3460 public:
3461
3462 // SVE integer arithmetic - predicate
3463 #define INSN(NAME, op1, op2) \
3464 void NAME(FloatRegister Zdn_or_Zd_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Znm_or_Vn) { \
3465 assert(T != Q, "invalid register variant"); \
3466 sve_predicate_reg_insn(op1, op2, Zdn_or_Zd_or_Vd, T, Pg, Znm_or_Vn); \
3467 }
3468
3469 INSN(sve_abs, 0b00000100, 0b010110101); // vector abs, unary
3470 INSN(sve_add, 0b00000100, 0b000000000); // vector add
3471 INSN(sve_and, 0b00000100, 0b011010000); // vector and
3472 INSN(sve_andv, 0b00000100, 0b011010001); // bitwise and reduction to scalar
3473 INSN(sve_asr, 0b00000100, 0b010000100); // vector arithmetic shift right
3474 INSN(sve_bic, 0b00000100, 0b011011000); // vector bitwise clear
3475 INSN(sve_clz, 0b00000100, 0b011001101); // vector count leading zero bits
3476 INSN(sve_cnt, 0b00000100, 0b011010101); // count non-zero bits
3477 INSN(sve_cpy, 0b00000101, 0b100000100); // copy scalar to each active vector element
3478 INSN(sve_eor, 0b00000100, 0b011001000); // vector eor
3479 INSN(sve_eorv, 0b00000100, 0b011001001); // bitwise xor reduction to scalar
3480 INSN(sve_lsl, 0b00000100, 0b010011100); // vector logical shift left
3481 INSN(sve_lsr, 0b00000100, 0b010001100); // vector logical shift right
3482 INSN(sve_mul, 0b00000100, 0b010000000); // vector mul
3483 INSN(sve_neg, 0b00000100, 0b010111101); // vector neg, unary
3484 INSN(sve_not, 0b00000100, 0b011110101); // bitwise invert vector, unary
3485 INSN(sve_orr, 0b00000100, 0b011000000); // vector or
3486 INSN(sve_orv, 0b00000100, 0b011000001); // bitwise or reduction to scalar
3487 INSN(sve_smax, 0b00000100, 0b001000000); // signed maximum vectors
3488 INSN(sve_smaxv, 0b00000100, 0b001000001); // signed maximum reduction to scalar
3489 INSN(sve_smin, 0b00000100, 0b001010000); // signed minimum vectors
3490 INSN(sve_sminv, 0b00000100, 0b001010001); // signed minimum reduction to scalar
3491 INSN(sve_splice,0b00000101, 0b101100100); // splice two vectors under predicate control, destructive
3492 INSN(sve_sub, 0b00000100, 0b000001000); // vector sub
3493 INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
3494 INSN(sve_umax, 0b00000100, 0b001001000); // unsigned maximum vectors
3495 INSN(sve_umaxv, 0b00000100, 0b001001001); // unsigned maximum reduction to scalar
3496 INSN(sve_umin, 0b00000100, 0b001011000); // unsigned minimum vectors
3497 INSN(sve_uminv, 0b00000100, 0b001011001); // unsigned minimum reduction to scalar
3498 #undef INSN
3499
3500 // SVE floating-point arithmetic - predicate
3501 #define INSN(NAME, op1, op2) \
3502 void NAME(FloatRegister Zd_or_Zdn_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn_or_Zm) { \
3503 assert(T == H || T == S || T == D, "invalid register variant"); \
3504 sve_predicate_reg_insn(op1, op2, Zd_or_Zdn_or_Vd, T, Pg, Zn_or_Zm); \
3505 }
3506
3507 INSN(sve_fabd, 0b01100101, 0b001000100); // floating-point absolute difference
3508 INSN(sve_fabs, 0b00000100, 0b011100101);
3509 INSN(sve_fadd, 0b01100101, 0b000000100);
3510 INSN(sve_fadda, 0b01100101, 0b011000001); // add strictly-ordered reduction to scalar Vd
3511 INSN(sve_fdiv, 0b01100101, 0b001101100);
3512 INSN(sve_fmax, 0b01100101, 0b000110100); // floating-point maximum
3513 INSN(sve_fmaxv, 0b01100101, 0b000110001); // floating-point maximum recursive reduction to scalar
3514 INSN(sve_fmin, 0b01100101, 0b000111100); // floating-point minimum
3515 INSN(sve_fminv, 0b01100101, 0b000111001); // floating-point minimum recursive reduction to scalar
3516 INSN(sve_fmul, 0b01100101, 0b000010100);
3517 INSN(sve_fneg, 0b00000100, 0b011101101);
3518 INSN(sve_frintm, 0b01100101, 0b000010101); // floating-point round to integral value, toward minus infinity
3519 INSN(sve_frintn, 0b01100101, 0b000000101); // floating-point round to integral value, nearest with ties to even
3520 INSN(sve_frinta, 0b01100101, 0b000100101); // floating-point round to integral value, nearest with ties to away
3521 INSN(sve_frintp, 0b01100101, 0b000001101); // floating-point round to integral value, toward plus infinity
3522 INSN(sve_fsqrt, 0b01100101, 0b001101101);
3523 INSN(sve_fsub, 0b01100101, 0b000001100);
3524 #undef INSN
3525
3526 // SVE multiple-add/sub - predicated
3527 #define INSN(NAME, op0, op1, op2) \
3528 void NAME(FloatRegister Zda, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn, FloatRegister Zm) { \
3529 starti; \
3530 assert(T != Q, "invalid size"); \
3531 f(op0, 31, 24), f(T, 23, 22), f(op1, 21), rf(Zm, 16); \
3532 f(op2, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zda, 0); \
3533 }
3534
3535 INSN(sve_fmla, 0b01100101, 1, 0b000); // floating-point fused multiply-add, writing addend: Zda = Zda + Zn * Zm
3536 INSN(sve_fmls, 0b01100101, 1, 0b001); // floating-point fused multiply-subtract: Zda = Zda + -Zn * Zm
3537 INSN(sve_fnmla, 0b01100101, 1, 0b010); // floating-point negated fused multiply-add: Zda = -Zda + -Zn * Zm
3538 INSN(sve_fnmls, 0b01100101, 1, 0b011); // floating-point negated fused multiply-subtract: Zda = -Zda + Zn * Zm
3539 INSN(sve_fmad, 0b01100101, 1, 0b100); // floating-point fused multiply-add, writing multiplicand: Zda = Zm + Zda * Zn
3540 INSN(sve_fmsb, 0b01100101, 1, 0b101); // floating-point fused multiply-subtract, writing multiplicand: Zda = Zm + -Zda * Zn
3541 INSN(sve_fnmad, 0b01100101, 1, 0b110); // floating-point negated fused multiply-add, writing multiplicand: Zda = -Zm + -Zda * Zn
3542 INSN(sve_fnmsb, 0b01100101, 1, 0b111); // floating-point negated fused multiply-subtract, writing multiplicand: Zda = -Zm + Zda * Zn
3543 INSN(sve_mla, 0b00000100, 0, 0b010); // multiply-add, writing addend: Zda = Zda + Zn*Zm
3544 INSN(sve_mls, 0b00000100, 0, 0b011); // multiply-subtract, writing addend: Zda = Zda + -Zn*Zm
3545 #undef INSN
3546
3547 // SVE bitwise logical - unpredicated
3548 #define INSN(NAME, opc) \
3549 void NAME(FloatRegister Zd, FloatRegister Zn, FloatRegister Zm) { \
3550 starti; \
3551 f(0b00000100, 31, 24), f(opc, 23, 22), f(1, 21), \
3552 rf(Zm, 16), f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0); \
3553 }
3554 INSN(sve_and, 0b00);
3555 INSN(sve_eor, 0b10);
3556 INSN(sve_orr, 0b01);
3557 INSN(sve_bic, 0b11);
3558 #undef INSN
3559
3560 // SVE bitwise logical with immediate (unpredicated)
3561 #define INSN(NAME, opc) \
3562 void NAME(FloatRegister Zd, SIMD_RegVariant T, uint64_t imm) { \
3563 starti; \
3564 unsigned elembits = regVariant_to_elemBits(T); \
3565 uint32_t val = encode_sve_logical_immediate(elembits, imm); \
3566 f(0b00000101, 31, 24), f(opc, 23, 22), f(0b0000, 21, 18); \
3567 f(val, 17, 5), rf(Zd, 0); \
3568 }
3569 INSN(sve_and, 0b10);
3570 INSN(sve_eor, 0b01);
3571 INSN(sve_orr, 0b00);
3572 #undef INSN
3573
3574 // SVE shift immediate - unpredicated
3575 #define INSN(NAME, opc, isSHR) \
3576 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, int shift) { \
3577 starti; \
3578 int tszh, tszl_imm; \
3579 sve_shift_imm_encoding(T, shift, isSHR, tszh, tszl_imm); \
3580 f(0b00000100, 31, 24); \
3581 f(tszh, 23, 22), f(1,21), f(tszl_imm, 20, 16); \
3582 f(0b100, 15, 13), f(opc, 12, 10), rf(Zn, 5), rf(Zd, 0); \
3583 }
3584
3585 INSN(sve_asr, 0b100, /* isSHR = */ true);
3586 INSN(sve_lsl, 0b111, /* isSHR = */ false);
3587 INSN(sve_lsr, 0b101, /* isSHR = */ true);
3588 #undef INSN
3589
3590 // SVE bitwise shift by immediate (predicated)
3591 #define INSN(NAME, opc, isSHR) \
3592 void NAME(FloatRegister Zdn, SIMD_RegVariant T, PRegister Pg, int shift) { \
3593 starti; \
3594 int tszh, tszl_imm; \
3595 sve_shift_imm_encoding(T, shift, isSHR, tszh, tszl_imm); \
3596 f(0b00000100, 31, 24), f(tszh, 23, 22), f(0b00, 21, 20), f(opc, 19, 16); \
3597 f(0b100, 15, 13), pgrf(Pg, 10), f(tszl_imm, 9, 5), rf(Zdn, 0); \
3598 }
3599
3600 INSN(sve_asr, 0b0000, /* isSHR = */ true);
3601 INSN(sve_lsl, 0b0011, /* isSHR = */ false);
3602 INSN(sve_lsr, 0b0001, /* isSHR = */ true);
3603 #undef INSN
3604
3605 private:
3606
3607 // Scalar base + immediate index
3608 void sve_ld_st1(FloatRegister Zt, Register Xn, int imm, PRegister Pg,
3609 SIMD_RegVariant T, int op1, int type, int op2) {
3610 starti;
3611 assert_cond(T >= type);
3612 f(op1, 31, 25), f(type, 24, 23), f(T, 22, 21);
3613 f(0, 20), sf(imm, 19, 16), f(op2, 15, 13);
3614 pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0);
3615 }
3616
3617 // Scalar base + scalar index
3618 void sve_ld_st1(FloatRegister Zt, Register Xn, Register Xm, PRegister Pg,
3619 SIMD_RegVariant T, int op1, int type, int op2) {
3620 starti;
3621 assert_cond(T >= type);
3622 f(op1, 31, 25), f(type, 24, 23), f(T, 22, 21);
3623 rf(Xm, 16), f(op2, 15, 13);
3624 pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0);
3625 }
3626
3627 void sve_ld_st1(FloatRegister Zt, PRegister Pg,
3628 SIMD_RegVariant T, const Address &a,
3629 int op1, int type, int imm_op2, int scalar_op2) {
3630 switch (a.getMode()) {
3631 case Address::base_plus_offset:
3632 sve_ld_st1(Zt, a.base(), checked_cast<int>(a.offset()), Pg, T, op1, type, imm_op2);
3633 break;
3634 case Address::base_plus_offset_reg:
3635 sve_ld_st1(Zt, a.base(), a.index(), Pg, T, op1, type, scalar_op2);
3636 break;
3637 default:
3638 ShouldNotReachHere();
3639 }
3640 }
3641
3642 public:
3643
3644 // SVE contiguous load/store
3645 #define INSN(NAME, op1, type, imm_op2, scalar_op2) \
3646 void NAME(FloatRegister Zt, SIMD_RegVariant T, PRegister Pg, const Address &a) { \
3647 assert(T != Q, "invalid register variant"); \
3648 sve_ld_st1(Zt, Pg, T, a, op1, type, imm_op2, scalar_op2); \
3649 }
3650
3651 INSN(sve_ld1b, 0b1010010, 0b00, 0b101, 0b010);
3652 INSN(sve_st1b, 0b1110010, 0b00, 0b111, 0b010);
3653 INSN(sve_ld1h, 0b1010010, 0b01, 0b101, 0b010);
3654 INSN(sve_st1h, 0b1110010, 0b01, 0b111, 0b010);
3655 INSN(sve_ld1w, 0b1010010, 0b10, 0b101, 0b010);
3656 INSN(sve_st1w, 0b1110010, 0b10, 0b111, 0b010);
3657 INSN(sve_ld1d, 0b1010010, 0b11, 0b101, 0b010);
3658 INSN(sve_st1d, 0b1110010, 0b11, 0b111, 0b010);
3659 #undef INSN
3660
3661 // Gather/scatter load/store (SVE) - scalar plus vector
3662 #define INSN(NAME, op1, type, op2, op3) \
3663 void NAME(FloatRegister Zt, PRegister Pg, Register Xn, FloatRegister Zm) { \
3664 starti; \
3665 f(op1, 31, 25), f(type, 24, 23), f(op2, 22, 21), rf(Zm, 16); \
3666 f(op3, 15, 13), pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0); \
3667 }
3668 // SVE 32-bit gather load words (scalar plus 32-bit scaled offsets)
3669 INSN(sve_ld1w_gather, 0b1000010, 0b10, 0b01, 0b010);
3670 // SVE 64-bit gather load (scalar plus 32-bit unpacked scaled offsets)
3671 INSN(sve_ld1d_gather, 0b1100010, 0b11, 0b01, 0b010);
3672 // SVE 32-bit scatter store (scalar plus 32-bit scaled offsets)
3673 INSN(sve_st1w_scatter, 0b1110010, 0b10, 0b11, 0b100);
3674 // SVE 64-bit scatter store (scalar plus unpacked 32-bit scaled offsets)
3675 INSN(sve_st1d_scatter, 0b1110010, 0b11, 0b01, 0b100);
3676 #undef INSN
3677
3678 // SVE load/store - unpredicated
3679 #define INSN(NAME, op1) \
3680 void NAME(FloatRegister Zt, const Address &a) { \
3681 starti; \
3682 assert(a.index() == noreg, "invalid address variant"); \
3683 f(op1, 31, 29), f(0b0010110, 28, 22), sf(a.offset() >> 3, 21, 16), \
3684 f(0b010, 15, 13), f(a.offset() & 0x7, 12, 10), srf(a.base(), 5), rf(Zt, 0); \
3685 }
3686
3687 INSN(sve_ldr, 0b100); // LDR (vector)
3688 INSN(sve_str, 0b111); // STR (vector)
3689 #undef INSN
3690
3691 // SVE stack frame adjustment
3692 #define INSN(NAME, op) \
3693 void NAME(Register Xd, Register Xn, int imm6) { \
3694 starti; \
3695 f(0b000001000, 31, 23), f(op, 22, 21); \
3696 srf(Xn, 16), f(0b01010, 15, 11), sf(imm6, 10, 5), srf(Xd, 0); \
3697 }
3698
3699 INSN(sve_addvl, 0b01); // Add multiple of vector register size to scalar register
3700 INSN(sve_addpl, 0b11); // Add multiple of predicate register size to scalar register
3701 #undef INSN
3702
3703 // SVE inc/dec register by element count
3704 #define INSN(NAME, op) \
3705 void NAME(Register Xdn, SIMD_RegVariant T, unsigned imm4 = 1, int pattern = 0b11111) { \
3706 starti; \
3707 assert(T != Q, "invalid size"); \
3708 f(0b00000100,31, 24), f(T, 23, 22), f(0b11, 21, 20); \
3709 f(imm4 - 1, 19, 16), f(0b11100, 15, 11), f(op, 10), f(pattern, 9, 5), rf(Xdn, 0); \
3710 }
3711
3712 INSN(sve_inc, 0);
3713 INSN(sve_dec, 1);
3714 #undef INSN
3715
3716 // SVE predicate logical operations
3717 #define INSN(NAME, op1, op2, op3) \
3718 void NAME(PRegister Pd, PRegister Pg, PRegister Pn, PRegister Pm) { \
3719 starti; \
3720 f(0b00100101, 31, 24), f(op1, 23, 22), f(0b00, 21, 20); \
3721 prf(Pm, 16), f(0b01, 15, 14), prf(Pg, 10), f(op2, 9); \
3722 prf(Pn, 5), f(op3, 4), prf(Pd, 0); \
3723 }
3724
3725 INSN(sve_and, 0b00, 0b0, 0b0);
3726 INSN(sve_ands, 0b01, 0b0, 0b0);
3727 INSN(sve_eor, 0b00, 0b1, 0b0);
3728 INSN(sve_eors, 0b01, 0b1, 0b0);
3729 INSN(sve_orr, 0b10, 0b0, 0b0);
3730 INSN(sve_orrs, 0b11, 0b0, 0b0);
3731 INSN(sve_bic, 0b00, 0b0, 0b1);
3732 #undef INSN
3733
3734 // SVE increment register by predicate count
3735 void sve_incp(const Register rd, SIMD_RegVariant T, PRegister pg) {
3736 starti;
3737 assert(T != Q, "invalid size");
3738 f(0b00100101, 31, 24), f(T, 23, 22), f(0b1011001000100, 21, 9),
3739 prf(pg, 5), rf(rd, 0);
3740 }
3741
3742 // SVE broadcast general-purpose register to vector elements (unpredicated)
3743 void sve_dup(FloatRegister Zd, SIMD_RegVariant T, Register Rn) {
3744 starti;
3745 assert(T != Q, "invalid size");
3746 f(0b00000101, 31, 24), f(T, 23, 22), f(0b100000001110, 21, 10);
3747 srf(Rn, 5), rf(Zd, 0);
3748 }
3749
3750 // SVE broadcast signed immediate to vector elements (unpredicated)
3751 void sve_dup(FloatRegister Zd, SIMD_RegVariant T, int imm8) {
3752 starti;
3753 assert(T != Q, "invalid size");
3754 int sh = 0;
3755 if (imm8 <= 127 && imm8 >= -128) {
3756 sh = 0;
3757 } else if (T != B && imm8 <= 32512 && imm8 >= -32768 && (imm8 & 0xff) == 0) {
3758 sh = 1;
3759 imm8 = (imm8 >> 8);
3760 } else {
3761 guarantee(false, "invalid immediate");
3762 }
3763 f(0b00100101, 31, 24), f(T, 23, 22), f(0b11100011, 21, 14);
3764 f(sh, 13), sf(imm8, 12, 5), rf(Zd, 0);
3765 }
3766
3767 // SVE predicate test
3768 void sve_ptest(PRegister Pg, PRegister Pn) {
3769 starti;
3770 f(0b001001010101000011, 31, 14), prf(Pg, 10), f(0, 9), prf(Pn, 5), f(0, 4, 0);
3771 }
3772
3773 // SVE predicate initialize
3774 void sve_ptrue(PRegister pd, SIMD_RegVariant esize, int pattern = 0b11111) {
3775 starti;
3776 f(0b00100101, 31, 24), f(esize, 23, 22), f(0b011000111000, 21, 10);
3777 f(pattern, 9, 5), f(0b0, 4), prf(pd, 0);
3778 }
3779
3780 // SVE predicate zero
3781 void sve_pfalse(PRegister pd) {
3782 starti;
3783 f(0b00100101, 31, 24), f(0b00, 23, 22), f(0b011000111001, 21, 10);
3784 f(0b000000, 9, 4), prf(pd, 0);
3785 }
3786
3787 // SVE load/store predicate register
3788 #define INSN(NAME, op1) \
3789 void NAME(PRegister Pt, const Address &a) { \
3790 starti; \
3791 assert(a.index() == noreg, "invalid address variant"); \
3792 f(op1, 31, 29), f(0b0010110, 28, 22), sf(a.offset() >> 3, 21, 16), \
3793 f(0b000, 15, 13), f(a.offset() & 0x7, 12, 10), srf(a.base(), 5), \
3794 f(0, 4), prf(Pt, 0); \
3795 }
3796
3797 INSN(sve_ldr, 0b100); // LDR (predicate)
3798 INSN(sve_str, 0b111); // STR (predicate)
3799 #undef INSN
3800
3801 // SVE move predicate register
3802 void sve_mov(PRegister Pd, PRegister Pn) {
3803 starti;
3804 f(0b001001011000, 31, 20), prf(Pn, 16), f(0b01, 15, 14), prf(Pn, 10);
3805 f(0, 9), prf(Pn, 5), f(0, 4), prf(Pd, 0);
3806 }
3807
3808 // SVE copy general-purpose register to vector elements (predicated)
3809 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, Register Rn) {
3810 starti;
3811 assert(T != Q, "invalid size");
3812 f(0b00000101, 31, 24), f(T, 23, 22), f(0b101000101, 21, 13);
3813 pgrf(Pg, 10), srf(Rn, 5), rf(Zd, 0);
3814 }
3815
3816 private:
3817 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8,
3818 bool isMerge, bool isFloat) {
3819 starti;
3820 assert(T != Q, "invalid size");
3821 int sh = 0;
3822 if (isFloat) {
3823 assert(T != B, "invalid size");
3824 assert((imm8 >> 8) == 0, "invalid immediate");
3825 sh = 0;
3826 } else if (imm8 <= 127 && imm8 >= -128) {
3827 sh = 0;
3828 } else if (T != B && imm8 <= 32512 && imm8 >= -32768 && (imm8 & 0xff) == 0) {
3829 sh = 1;
3830 imm8 = (imm8 >> 8);
3831 } else {
3832 guarantee(false, "invalid immediate");
3833 }
3834 int m = isMerge ? 1 : 0;
3835 f(0b00000101, 31, 24), f(T, 23, 22), f(0b01, 21, 20);
3836 prf(Pg, 16), f(isFloat ? 1 : 0, 15), f(m, 14), f(sh, 13), f(imm8 & 0xff, 12, 5), rf(Zd, 0);
3837 }
3838
3839 public:
3840 // SVE copy signed integer immediate to vector elements (predicated)
3841 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, int imm8, bool isMerge) {
3842 sve_cpy(Zd, T, Pg, imm8, isMerge, /*isFloat*/false);
3843 }
3844 // SVE copy floating-point immediate to vector elements (predicated)
3845 void sve_cpy(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, double d) {
3846 sve_cpy(Zd, T, Pg, checked_cast<uint8_t>(pack(d)), /*isMerge*/true, /*isFloat*/true);
3847 }
3848
3849 // SVE conditionally select elements from two vectors
3850 void sve_sel(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg,
3851 FloatRegister Zn, FloatRegister Zm) {
3852 starti;
3853 assert(T != Q, "invalid size");
3854 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
3855 f(0b11, 15, 14), prf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
3856 }
3857
3858 // SVE Permute Vector - Extract
3859 void sve_ext(FloatRegister Zdn, FloatRegister Zm, int imm8) {
3860 starti;
3861 f(0b00000101001, 31, 21), f(imm8 >> 3, 20, 16), f(0b000, 15, 13);
3862 f(imm8 & 0b111, 12, 10), rf(Zm, 5), rf(Zdn, 0);
3863 }
3864
3865 // SVE Integer/Floating-Point Compare - Vectors
3866 #define INSN(NAME, op1, op2, fp) \
3867 void NAME(Condition cond, PRegister Pd, SIMD_RegVariant T, PRegister Pg, \
3868 FloatRegister Zn, FloatRegister Zm) { \
3869 starti; \
3870 assert(T != Q, "invalid size"); \
3871 bool is_absolute = op2 == 0b11; \
3872 if (fp == 1) { \
3873 assert(T != B, "invalid size"); \
3874 if (is_absolute) { \
3875 assert(cond == GT || cond == GE, "invalid condition for fac"); \
3876 } else { \
3877 assert(cond != HI && cond != HS, "invalid condition for fcm"); \
3878 } \
3879 } \
3880 int cond_op; \
3881 switch(cond) { \
3882 case EQ: cond_op = (op2 << 2) | 0b10; break; \
3883 case NE: cond_op = (op2 << 2) | 0b11; break; \
3884 case GE: cond_op = (op2 << 2) | (is_absolute ? 0b01 : 0b00); break; \
3885 case GT: cond_op = (op2 << 2) | (is_absolute ? 0b11 : 0b01); break; \
3886 case HI: cond_op = 0b0001; break; \
3887 case HS: cond_op = 0b0000; break; \
3888 default: \
3889 ShouldNotReachHere(); \
3890 } \
3891 f(op1, 31, 24), f(T, 23, 22), f(0, 21), rf(Zm, 16), f((cond_op >> 1) & 7, 15, 13); \
3892 pgrf(Pg, 10), rf(Zn, 5), f(cond_op & 1, 4), prf(Pd, 0); \
3893 }
3894
3895 INSN(sve_cmp, 0b00100100, 0b10, 0); // Integer compare vectors
3896 INSN(sve_fcm, 0b01100101, 0b01, 1); // Floating-point compare vectors
3897 INSN(sve_fac, 0b01100101, 0b11, 1); // Floating-point absolute compare vectors
3898 #undef INSN
3899
3900 private:
3901 // Convert Assembler::Condition to op encoding - used by sve integer compare encoding
3902 static int assembler_cond_to_sve_op(Condition cond, bool &is_unsigned) {
3903 if (cond == HI || cond == HS || cond == LO || cond == LS) {
3904 is_unsigned = true;
3905 } else {
3906 is_unsigned = false;
3907 }
3908
3909 switch (cond) {
3910 case HI:
3911 case GT:
3912 return 0b0001;
3913 case HS:
3914 case GE:
3915 return 0b0000;
3916 case LO:
3917 case LT:
3918 return 0b0010;
3919 case LS:
3920 case LE:
3921 return 0b0011;
3922 case EQ:
3923 return 0b1000;
3924 case NE:
3925 return 0b1001;
3926 default:
3927 ShouldNotReachHere();
3928 return -1;
3929 }
3930 }
3931
3932 public:
3933 // SVE Integer Compare - 5 bits signed imm and 7 bits unsigned imm
3934 void sve_cmp(Condition cond, PRegister Pd, SIMD_RegVariant T,
3935 PRegister Pg, FloatRegister Zn, int imm) {
3936 starti;
3937 assert(T != Q, "invalid size");
3938 bool is_unsigned = false;
3939 int cond_op = assembler_cond_to_sve_op(cond, is_unsigned);
3940 f(is_unsigned ? 0b00100100 : 0b00100101, 31, 24), f(T, 23, 22);
3941 f(is_unsigned ? 0b1 : 0b0, 21);
3942 if (is_unsigned) {
3943 f(imm, 20, 14), f((cond_op >> 1) & 0x1, 13);
3944 } else {
3945 sf(imm, 20, 16), f((cond_op >> 1) & 0x7, 15, 13);
3946 }
3947 pgrf(Pg, 10), rf(Zn, 5), f(cond_op & 0x1, 4), prf(Pd, 0);
3948 }
3949
3950 // SVE Floating-point compare vector with zero
3951 void sve_fcm(Condition cond, PRegister Pd, SIMD_RegVariant T,
3952 PRegister Pg, FloatRegister Zn, double d) {
3953 starti;
3954 assert(T != Q, "invalid size");
3955 guarantee(d == 0.0, "invalid immediate");
3956 int cond_op;
3957 switch(cond) {
3958 case EQ: cond_op = 0b100; break;
3959 case GT: cond_op = 0b001; break;
3960 case GE: cond_op = 0b000; break;
3961 case LT: cond_op = 0b010; break;
3962 case LE: cond_op = 0b011; break;
3963 case NE: cond_op = 0b110; break;
3964 default:
3965 ShouldNotReachHere();
3966 }
3967 f(0b01100101, 31, 24), f(T, 23, 22), f(0b0100, 21, 18),
3968 f((cond_op >> 1) & 0x3, 17, 16), f(0b001, 15, 13),
3969 pgrf(Pg, 10), rf(Zn, 5);
3970 f(cond_op & 0x1, 4), prf(Pd, 0);
3971 }
3972
3973 // SVE unpack vector elements
3974 protected:
3975 void _sve_xunpk(bool is_unsigned, bool is_high, FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn) {
3976 starti;
3977 assert(T != B && T != Q, "invalid size");
3978 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1100, 21, 18);
3979 f(is_unsigned ? 1 : 0, 17), f(is_high ? 1 : 0, 16),
3980 f(0b001110, 15, 10), rf(Zn, 5), rf(Zd, 0);
3981 }
3982
3983 public:
3984 #define INSN(NAME, is_unsigned, is_high) \
3985 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn) { \
3986 _sve_xunpk(is_unsigned, is_high, Zd, T, Zn); \
3987 }
3988
3989 INSN(sve_uunpkhi, true, true ); // Unsigned unpack and extend half of vector - high half
3990 INSN(sve_uunpklo, true, false); // Unsigned unpack and extend half of vector - low half
3991 INSN(sve_sunpkhi, false, true ); // Signed unpack and extend half of vector - high half
3992 INSN(sve_sunpklo, false, false); // Signed unpack and extend half of vector - low half
3993 #undef INSN
3994
3995 // SVE unpack predicate elements
3996 #define INSN(NAME, op) \
3997 void NAME(PRegister Pd, PRegister Pn) { \
3998 starti; \
3999 f(0b000001010011000, 31, 17), f(op, 16), f(0b0100000, 15, 9); \
4000 prf(Pn, 5), f(0b0, 4), prf(Pd, 0); \
4001 }
4002
4003 INSN(sve_punpkhi, 0b1); // Unpack and widen high half of predicate
4004 INSN(sve_punpklo, 0b0); // Unpack and widen low half of predicate
4005 #undef INSN
4006
4007 // SVE permute vector elements
4008 #define INSN(NAME, op) \
4009 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \
4010 starti; \
4011 assert(T != Q, "invalid size"); \
4012 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16); \
4013 f(0b01101, 15, 11), f(op, 10), rf(Zn, 5), rf(Zd, 0); \
4014 }
4015
4016 INSN(sve_uzp1, 0b0); // Concatenate even elements from two vectors
4017 INSN(sve_uzp2, 0b1); // Concatenate odd elements from two vectors
4018 #undef INSN
4019
4020 // SVE permute predicate elements
4021 #define INSN(NAME, op) \
4022 void NAME(PRegister Pd, SIMD_RegVariant T, PRegister Pn, PRegister Pm) { \
4023 starti; \
4024 assert(T != Q, "invalid size"); \
4025 f(0b00000101, 31, 24), f(T, 23, 22), f(0b10, 21, 20), prf(Pm, 16); \
4026 f(0b01001, 15, 11), f(op, 10), f(0b0, 9), prf(Pn, 5), f(0b0, 4), prf(Pd, 0); \
4027 }
4028
4029 INSN(sve_uzp1, 0b0); // Concatenate even elements from two predicates
4030 INSN(sve_uzp2, 0b1); // Concatenate odd elements from two predicates
4031 #undef INSN
4032
4033 // SVE integer compare scalar count and limit
4034 #define INSN(NAME, sf, op) \
4035 void NAME(PRegister Pd, SIMD_RegVariant T, Register Rn, Register Rm) { \
4036 starti; \
4037 assert(T != Q, "invalid register variant"); \
4038 f(0b00100101, 31, 24), f(T, 23, 22), f(1, 21), \
4039 zrf(Rm, 16), f(0, 15, 13), f(sf, 12), f(op >> 1, 11, 10), \
4040 zrf(Rn, 5), f(op & 1, 4), prf(Pd, 0); \
4041 }
4042 // While incrementing signed scalar less than scalar
4043 INSN(sve_whileltw, 0b0, 0b010);
4044 INSN(sve_whilelt, 0b1, 0b010);
4045 // While incrementing signed scalar less than or equal to scalar
4046 INSN(sve_whilelew, 0b0, 0b011);
4047 INSN(sve_whilele, 0b1, 0b011);
4048 // While incrementing unsigned scalar lower than scalar
4049 INSN(sve_whilelow, 0b0, 0b110);
4050 INSN(sve_whilelo, 0b1, 0b110);
4051 // While incrementing unsigned scalar lower than or the same as scalar
4052 INSN(sve_whilelsw, 0b0, 0b111);
4053 INSN(sve_whilels, 0b1, 0b111);
4054 #undef INSN
4055
4056 // SVE predicate reverse
4057 void sve_rev(PRegister Pd, SIMD_RegVariant T, PRegister Pn) {
4058 starti;
4059 assert(T != Q, "invalid size");
4060 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1101000100000, 21, 9);
4061 prf(Pn, 5), f(0, 4), prf(Pd, 0);
4062 }
4063
4064 // SVE partition break condition
4065 #define INSN(NAME, op) \
4066 void NAME(PRegister Pd, PRegister Pg, PRegister Pn, bool isMerge) { \
4067 starti; \
4068 f(0b00100101, 31, 24), f(op, 23, 22), f(0b01000001, 21, 14); \
4069 prf(Pg, 10), f(0b0, 9), prf(Pn, 5), f(isMerge ? 1 : 0, 4), prf(Pd, 0); \
4070 }
4071
4072 INSN(sve_brka, 0b00); // Break after first true condition
4073 INSN(sve_brkb, 0b10); // Break before first true condition
4074 #undef INSN
4075
4076 // SVE move prefix (unpredicated)
4077 void sve_movprfx(FloatRegister Zd, FloatRegister Zn) {
4078 starti;
4079 f(0b00000100, 31, 24), f(0b00, 23, 22), f(0b1, 21), f(0b00000, 20, 16);
4080 f(0b101111, 15, 10), rf(Zn, 5), rf(Zd, 0);
4081 }
4082
4083 // Element count and increment scalar (SVE)
4084 #define INSN(NAME, TYPE) \
4085 void NAME(Register Xdn, unsigned imm4 = 1, int pattern = 0b11111) { \
4086 starti; \
4087 f(0b00000100, 31, 24), f(TYPE, 23, 22), f(0b10, 21, 20); \
4088 f(imm4 - 1, 19, 16), f(0b11100, 15, 11), f(0, 10), f(pattern, 9, 5), rf(Xdn, 0); \
4089 }
4090
4091 INSN(sve_cntb, B); // Set scalar to multiple of 8-bit predicate constraint element count
4092 INSN(sve_cnth, H); // Set scalar to multiple of 16-bit predicate constraint element count
4093 INSN(sve_cntw, S); // Set scalar to multiple of 32-bit predicate constraint element count
4094 INSN(sve_cntd, D); // Set scalar to multiple of 64-bit predicate constraint element count
4095 #undef INSN
4096
4097 // Set scalar to active predicate element count
4098 void sve_cntp(Register Xd, SIMD_RegVariant T, PRegister Pg, PRegister Pn) {
4099 starti;
4100 assert(T != Q, "invalid size");
4101 f(0b00100101, 31, 24), f(T, 23, 22), f(0b10000010, 21, 14);
4102 prf(Pg, 10), f(0, 9), prf(Pn, 5), rf(Xd, 0);
4103 }
4104
4105 // SVE convert signed integer to floating-point (predicated)
4106 void sve_scvtf(FloatRegister Zd, SIMD_RegVariant T_dst, PRegister Pg,
4107 FloatRegister Zn, SIMD_RegVariant T_src) {
4108 starti;
4109 assert(T_src != B && T_dst != B && T_src != Q && T_dst != Q &&
4110 (T_src != H || T_dst == T_src), "invalid register variant");
4111 int opc = T_dst;
4112 int opc2 = T_src;
4113 // In most cases we can treat T_dst, T_src as opc, opc2,
4114 // except for the following two combinations.
4115 // +-----+------+---+------------------------------------+
4116 // | opc | opc2 | U | Instruction Details |
4117 // +-----+------+---+------------------------------------+
4118 // | 11 | 00 | 0 | SCVTF - 32-bit to double-precision |
4119 // | 11 | 10 | 0 | SCVTF - 64-bit to single-precision |
4120 // +-----+------+---+------------------------------------+
4121 if (T_src == S && T_dst == D) {
4122 opc = 0b11;
4123 opc2 = 0b00;
4124 } else if (T_src == D && T_dst == S) {
4125 opc = 0b11;
4126 opc2 = 0b10;
4127 }
4128 f(0b01100101, 31, 24), f(opc, 23, 22), f(0b010, 21, 19);
4129 f(opc2, 18, 17), f(0b0101, 16, 13);
4130 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
4131 }
4132
4133 // SVE floating-point convert to signed integer, rounding toward zero (predicated)
4134 void sve_fcvtzs(FloatRegister Zd, SIMD_RegVariant T_dst, PRegister Pg,
4135 FloatRegister Zn, SIMD_RegVariant T_src) {
4136 starti;
4137 assert(T_src != B && T_dst != B && T_src != Q && T_dst != Q &&
4138 (T_dst != H || T_src == H), "invalid register variant");
4139 int opc = T_src;
4140 int opc2 = T_dst;
4141 // In most cases we can treat T_src, T_dst as opc, opc2,
4142 // except for the following two combinations.
4143 // +-----+------+---+-------------------------------------+
4144 // | opc | opc2 | U | Instruction Details |
4145 // +-----+------+---+-------------------------------------+
4146 // | 11 | 10 | 0 | FCVTZS - single-precision to 64-bit |
4147 // | 11 | 00 | 0 | FCVTZS - double-precision to 32-bit |
4148 // +-----+------+---+-------------------------------------+
4149 if (T_src == S && T_dst == D) {
4150 opc = 0b11;
4151 opc2 = 0b10;
4152 } else if (T_src == D && T_dst == S) {
4153 opc = 0b11;
4154 opc2 = 0b00;
4155 }
4156 f(0b01100101, 31, 24), f(opc, 23, 22), f(0b011, 21, 19);
4157 f(opc2, 18, 17), f(0b0101, 16, 13);
4158 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
4159 }
4160
4161 // SVE floating-point convert precision (predicated)
4162 void sve_fcvt(FloatRegister Zd, SIMD_RegVariant T_dst, PRegister Pg,
4163 FloatRegister Zn, SIMD_RegVariant T_src) {
4164 starti;
4165 assert(T_src != B && T_dst != B && T_src != Q && T_dst != Q &&
4166 T_src != T_dst, "invalid register variant");
4167 // The encodings of fields op1 (bits 17-16) and op2 (bits 23-22)
4168 // depend on T_src and T_dst as given below -
4169 // +-----+------+---------------------------------------------+
4170 // | op2 | op1 | Instruction Details |
4171 // +-----+------+---------------------------------------------+
4172 // | 10 | 01 | FCVT - half-precision to single-precision |
4173 // | 11 | 01 | FCVT - half-precision to double-precision |
4174 // | 10 | 00 | FCVT - single-precision to half-precision |
4175 // | 11 | 11 | FCVT - single-precision to double-precision |
4176 // | 11 | 00 | FCVT - double-preciison to half-precision |
4177 // | 11 | 10 | FCVT - double-precision to single-precision |
4178 // +-----+------+---+-----------------------------------------+
4179 int op1 = 0b00;
4180 int op2 = (T_src == D || T_dst == D) ? 0b11 : 0b10;
4181 if (T_src == H) {
4182 op1 = 0b01;
4183 } else if (T_dst == S) {
4184 op1 = 0b10;
4185 } else if (T_dst == D) {
4186 op1 = 0b11;
4187 }
4188 f(0b01100101, 31, 24), f(op2, 23, 22), f(0b0010, 21, 18);
4189 f(op1, 17, 16), f(0b101, 15, 13);
4190 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
4191 }
4192
4193 // SVE extract element to general-purpose register
4194 #define INSN(NAME, before) \
4195 void NAME(Register Rd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn) { \
4196 starti; \
4197 f(0b00000101, 31, 24), f(T, 23, 22), f(0b10000, 21, 17); \
4198 f(before, 16), f(0b101, 15, 13); \
4199 pgrf(Pg, 10), rf(Zn, 5), rf(Rd, 0); \
4200 }
4201
4202 INSN(sve_lasta, 0b0);
4203 INSN(sve_lastb, 0b1);
4204 #undef INSN
4205
4206 // SVE extract element to SIMD&FP scalar register
4207 #define INSN(NAME, before) \
4208 void NAME(FloatRegister Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn) { \
4209 starti; \
4210 f(0b00000101, 31, 24), f(T, 23, 22), f(0b10001, 21, 17); \
4211 f(before, 16), f(0b100, 15, 13); \
4212 pgrf(Pg, 10), rf(Zn, 5), rf(Vd, 0); \
4213 }
4214
4215 INSN(sve_lasta, 0b0);
4216 INSN(sve_lastb, 0b1);
4217 #undef INSN
4218
4219 // SVE reverse within elements
4220 #define INSN(NAME, opc, cond) \
4221 void NAME(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn) { \
4222 starti; \
4223 assert(cond, "invalid size"); \
4224 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1001, 21, 18), f(opc, 17, 16); \
4225 f(0b100, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0); \
4226 }
4227
4228 INSN(sve_revb, 0b00, T == H || T == S || T == D);
4229 INSN(sve_rbit, 0b11, T != Q);
4230 #undef INSN
4231
4232 // SVE Create index starting from general-purpose register and incremented by immediate
4233 void sve_index(FloatRegister Zd, SIMD_RegVariant T, Register Rn, int imm) {
4234 starti;
4235 assert(T != Q, "invalid size");
4236 f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
4237 sf(imm, 20, 16), f(0b010001, 15, 10);
4238 rf(Rn, 5), rf(Zd, 0);
4239 }
4240
4241 // SVE create index starting from and incremented by immediate
4242 void sve_index(FloatRegister Zd, SIMD_RegVariant T, int imm1, int imm2) {
4243 starti;
4244 assert(T != Q, "invalid size");
4245 f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
4246 sf(imm2, 20, 16), f(0b010000, 15, 10);
4247 sf(imm1, 9, 5), rf(Zd, 0);
4248 }
4249
4250 private:
4251 void _sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, unsigned reg_count, FloatRegister Zm) {
4252 starti;
4253 assert(T != Q, "invalid size");
4254 // Only supports one or two vector lookup. One vector lookup was introduced in SVE1
4255 // and two vector lookup in SVE2
4256 assert(0 < reg_count && reg_count <= 2, "invalid number of registers");
4257
4258 int op11 = (reg_count == 1) ? 0b10 : 0b01;
4259
4260 f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
4261 f(0b001, 15, 13), f(op11, 12, 11), f(0b0, 10), rf(Zn, 5), rf(Zd, 0);
4262 }
4263
4264 public:
4265 // SVE/SVE2 Programmable table lookup in one or two vector table (zeroing)
4266 void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
4267 _sve_tbl(Zd, T, Zn, 1, Zm);
4268 }
4269
4270 void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn1, FloatRegister Zn2, FloatRegister Zm) {
4271 assert(Zn1->successor() == Zn2, "invalid order of registers");
4272 _sve_tbl(Zd, T, Zn1, 2, Zm);
4273 }
4274
4275 // Shuffle active elements of vector to the right and fill with zero
4276 void sve_compact(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, PRegister Pg) {
4277 starti;
4278 assert(T == S || T == D, "invalid size");
4279 f(0b00000101, 31, 24), f(T, 23, 22), f(0b100001100, 21, 13);
4280 pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
4281 }
4282
4283 // SVE2 Count matching elements in vector
4284 void sve_histcnt(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg,
4285 FloatRegister Zn, FloatRegister Zm) {
4286 starti;
4287 assert(T == S || T == D, "invalid size");
4288 f(0b01000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
4289 f(0b110, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
4290 }
4291
4292 // SVE2 bitwise permute
4293 #define INSN(NAME, opc) \
4294 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \
4295 starti; \
4296 assert(T != Q, "invalid size"); \
4297 f(0b01000101, 31, 24), f(T, 23, 22), f(0b0, 21); \
4298 rf(Zm, 16), f(0b1011, 15, 12), f(opc, 11, 10); \
4299 rf(Zn, 5), rf(Zd, 0); \
4300 }
4301
4302 INSN(sve_bext, 0b00);
4303 INSN(sve_bdep, 0b01);
4304 #undef INSN
4305
4306 // SVE2 bitwise ternary operations
4307 #define INSN(NAME, opc) \
4308 void NAME(FloatRegister Zdn, FloatRegister Zm, FloatRegister Zk) { \
4309 starti; \
4310 f(0b00000100, 31, 24), f(opc, 23, 21), rf(Zm, 16); \
4311 f(0b001110, 15, 10), rf(Zk, 5), rf(Zdn, 0); \
4312 }
4313
4314 INSN(sve_eor3, 0b001); // Bitwise exclusive OR of three vectors
4315 #undef INSN
4316
4317 // SVE2 saturating operations - predicate
4318 #define INSN(NAME, op1, op2) \
4319 void NAME(FloatRegister Zdn, SIMD_RegVariant T, PRegister Pg, FloatRegister Znm) { \
4320 assert(T != Q, "invalid register variant"); \
4321 sve_predicate_reg_insn(op1, op2, Zdn, T, Pg, Znm); \
4322 }
4323
4324 INSN(sve_sqadd, 0b01000100, 0b011000100); // signed saturating add
4325 INSN(sve_sqsub, 0b01000100, 0b011010100); // signed saturating sub
4326 INSN(sve_uqadd, 0b01000100, 0b011001100); // unsigned saturating add
4327 INSN(sve_uqsub, 0b01000100, 0b011011100); // unsigned saturating sub
4328
4329 #undef INSN
4330
4331 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
4332 MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
4333 }
4334
4335 // Stack overflow checking
4336 virtual void bang_stack_with_offset(int offset);
4337
4338 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
4339 static bool operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm);
4340 static bool operand_valid_for_add_sub_immediate(int64_t imm);
4341 static bool operand_valid_for_sve_add_sub_immediate(int64_t imm);
4342 static bool operand_valid_for_float_immediate(double imm);
4343 static int operand_valid_for_movi_immediate(uint64_t imm64, SIMD_Arrangement T);
4344 static bool operand_valid_for_sve_dup_immediate(int64_t imm);
4345
4346 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
4347 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
4348 };
4349
4350 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
4351 Assembler::Membar_mask_bits b) {
4352 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
4353 }
4354
4355 Instruction_aarch64::~Instruction_aarch64() {
4356 assem->emit_int32(insn);
4357 assert_cond(get_bits() == 0xffffffff);
4358 }
4359
4360 #undef f
4361 #undef sf
4362 #undef rf
4363 #undef srf
4364 #undef zrf
4365 #undef prf
4366 #undef pgrf
4367 #undef fixed
4368
4369 #undef starti
4370
4371 // Invert a condition
4372 inline Assembler::Condition operator~(const Assembler::Condition cond) {
4373 return Assembler::Condition(int(cond) ^ 1);
4374 }
4375
4376 extern "C" void das(uint64_t start, int len);
4377
4378 #endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP