1 // BEGIN Generated code -- do not edit 2 // Generated by aarch64-asmtest.py 3 Label back, forth; 4 __ bind(back); 5 6 // ArithOp 7 __ add(r26, r23, r13, Assembler::LSL, 32); // add x26, x23, x13, LSL #32 8 __ sub(r12, r24, r9, Assembler::LSR, 37); // sub x12, x24, x9, LSR #37 9 __ adds(r28, r15, r8, Assembler::ASR, 39); // adds x28, x15, x8, ASR #39 10 __ subs(r7, r28, r30, Assembler::ASR, 57); // subs x7, x28, x30, ASR #57 11 __ addw(r9, r22, r27, Assembler::ASR, 15); // add w9, w22, w27, ASR #15 12 __ subw(r3, r13, r17, Assembler::ASR, 30); // sub w3, w13, w17, ASR #30 13 __ addsw(r14, r26, r8, Assembler::ASR, 17); // adds w14, w26, w8, ASR #17 14 __ subsw(r0, r22, r12, Assembler::ASR, 21); // subs w0, w22, w12, ASR #21 15 __ andr(r0, r15, r26, Assembler::LSL, 20); // and x0, x15, x26, LSL #20 16 __ orr(r26, r5, r17, Assembler::LSL, 61); // orr x26, x5, x17, LSL #61 17 __ eor(r24, r13, r2, Assembler::LSL, 32); // eor x24, x13, x2, LSL #32 18 __ ands(r28, r3, r17, Assembler::ASR, 35); // ands x28, x3, x17, ASR #35 19 __ andw(r25, r16, r29, Assembler::LSR, 18); // and w25, w16, w29, LSR #18 20 __ orrw(r13, r17, r11, Assembler::LSR, 9); // orr w13, w17, w11, LSR #9 21 __ eorw(r5, r5, r17, Assembler::LSR, 15); // eor w5, w5, w17, LSR #15 22 __ andsw(r2, r23, r27, Assembler::ASR, 26); // ands w2, w23, w27, ASR #26 23 __ bic(r27, r28, r16, Assembler::LSR, 45); // bic x27, x28, x16, LSR #45 24 __ orn(r8, r25, r26, Assembler::ASR, 37); // orn x8, x25, x26, ASR #37 25 __ eon(r29, r17, r13, Assembler::LSR, 63); // eon x29, x17, x13, LSR #63 26 __ bics(r28, r24, r2, Assembler::LSR, 31); // bics x28, x24, x2, LSR #31 27 __ bicw(r19, r26, r7, Assembler::ASR, 3); // bic w19, w26, w7, ASR #3 28 __ ornw(r6, r24, r10, Assembler::ASR, 3); // orn w6, w24, w10, ASR #3 29 __ eonw(r4, r21, r1, Assembler::LSR, 29); // eon w4, w21, w1, LSR #29 30 __ bicsw(r16, r21, r0, Assembler::LSR, 19); // bics w16, w21, w0, LSR #19 31 32 // AddSubImmOp 33 __ addw(r17, r12, 379u); // add w17, w12, #379 34 __ addsw(r30, r1, 22u); // adds w30, w1, #22 35 __ subw(r29, r5, 126u); // sub w29, w5, #126 36 __ subsw(r6, r24, 960u); // subs w6, w24, #960 37 __ add(r0, r13, 104u); // add x0, x13, #104 38 __ adds(r8, r6, 663u); // adds x8, x6, #663 39 __ sub(r10, r5, 516u); // sub x10, x5, #516 40 __ subs(r1, r3, 1012u); // subs x1, x3, #1012 41 42 // LogicalImmOp 43 __ andw(r6, r11, 4294049777ull); // and w6, w11, #0xfff1fff1 44 __ orrw(r28, r5, 4294966791ull); // orr w28, w5, #0xfffffe07 45 __ eorw(r1, r20, 134217216ull); // eor w1, w20, #0x7fffe00 46 __ andsw(r7, r17, 1048576ull); // ands w7, w17, #0x100000 47 __ andr(r14, r12, 9223372036854775808ull); // and x14, x12, #0x8000000000000000 48 __ orr(r9, r11, 562675075514368ull); // orr x9, x11, #0x1ffc000000000 49 __ eor(r17, r0, 18014398509481728ull); // eor x17, x0, #0x3fffffffffff00 50 __ ands(r1, r8, 18446744073705357315ull); // ands x1, x8, #0xffffffffffc00003 51 52 // AbsOp 53 __ b(__ pc()); // b . 54 __ b(back); // b back 55 __ b(forth); // b forth 56 __ bl(__ pc()); // bl . 57 __ bl(back); // bl back 58 __ bl(forth); // bl forth 59 60 // RegAndAbsOp 61 __ cbzw(r10, __ pc()); // cbz w10, . 62 __ cbzw(r10, back); // cbz w10, back 63 __ cbzw(r10, forth); // cbz w10, forth 64 __ cbnzw(r8, __ pc()); // cbnz w8, . 65 __ cbnzw(r8, back); // cbnz w8, back 66 __ cbnzw(r8, forth); // cbnz w8, forth 67 __ cbz(r11, __ pc()); // cbz x11, . 68 __ cbz(r11, back); // cbz x11, back 69 __ cbz(r11, forth); // cbz x11, forth 70 __ cbnz(r29, __ pc()); // cbnz x29, . 71 __ cbnz(r29, back); // cbnz x29, back 72 __ cbnz(r29, forth); // cbnz x29, forth 73 __ adr(r19, __ pc()); // adr x19, . 74 __ adr(r19, back); // adr x19, back 75 __ adr(r19, forth); // adr x19, forth 76 __ _adrp(r19, __ pc()); // adrp x19, . 77 78 // RegImmAbsOp 79 __ tbz(r22, 6, __ pc()); // tbz x22, #6, . 80 __ tbz(r22, 6, back); // tbz x22, #6, back 81 __ tbz(r22, 6, forth); // tbz x22, #6, forth 82 __ tbnz(r12, 11, __ pc()); // tbnz x12, #11, . 83 __ tbnz(r12, 11, back); // tbnz x12, #11, back 84 __ tbnz(r12, 11, forth); // tbnz x12, #11, forth 85 86 // MoveWideImmOp 87 __ movnw(r0, 6301, 0); // movn w0, #6301, lsl 0 88 __ movzw(r7, 20886, 0); // movz w7, #20886, lsl 0 89 __ movkw(r27, 18617, 0); // movk w27, #18617, lsl 0 90 __ movn(r12, 22998, 16); // movn x12, #22998, lsl 16 91 __ movz(r20, 1532, 16); // movz x20, #1532, lsl 16 92 __ movk(r8, 5167, 32); // movk x8, #5167, lsl 32 93 94 // BitfieldOp 95 __ sbfm(r15, r17, 24, 28); // sbfm x15, x17, #24, #28 96 __ bfmw(r15, r9, 14, 25); // bfm w15, w9, #14, #25 97 __ ubfmw(r27, r25, 6, 31); // ubfm w27, w25, #6, #31 98 __ sbfm(r19, r2, 23, 31); // sbfm x19, x2, #23, #31 99 __ bfm(r12, r21, 10, 6); // bfm x12, x21, #10, #6 100 __ ubfm(r22, r0, 26, 16); // ubfm x22, x0, #26, #16 101 102 // ExtractOp 103 __ extrw(r3, r3, r20, 27); // extr w3, w3, w20, #27 104 __ extr(r8, r30, r3, 54); // extr x8, x30, x3, #54 105 106 // CondBranchOp 107 __ br(Assembler::EQ, __ pc()); // b.EQ . 108 __ br(Assembler::EQ, back); // b.EQ back 109 __ br(Assembler::EQ, forth); // b.EQ forth 110 __ br(Assembler::NE, __ pc()); // b.NE . 111 __ br(Assembler::NE, back); // b.NE back 112 __ br(Assembler::NE, forth); // b.NE forth 113 __ br(Assembler::HS, __ pc()); // b.HS . 114 __ br(Assembler::HS, back); // b.HS back 115 __ br(Assembler::HS, forth); // b.HS forth 116 __ br(Assembler::CS, __ pc()); // b.CS . 117 __ br(Assembler::CS, back); // b.CS back 118 __ br(Assembler::CS, forth); // b.CS forth 119 __ br(Assembler::LO, __ pc()); // b.LO . 120 __ br(Assembler::LO, back); // b.LO back 121 __ br(Assembler::LO, forth); // b.LO forth 122 __ br(Assembler::CC, __ pc()); // b.CC . 123 __ br(Assembler::CC, back); // b.CC back 124 __ br(Assembler::CC, forth); // b.CC forth 125 __ br(Assembler::MI, __ pc()); // b.MI . 126 __ br(Assembler::MI, back); // b.MI back 127 __ br(Assembler::MI, forth); // b.MI forth 128 __ br(Assembler::PL, __ pc()); // b.PL . 129 __ br(Assembler::PL, back); // b.PL back 130 __ br(Assembler::PL, forth); // b.PL forth 131 __ br(Assembler::VS, __ pc()); // b.VS . 132 __ br(Assembler::VS, back); // b.VS back 133 __ br(Assembler::VS, forth); // b.VS forth 134 __ br(Assembler::VC, __ pc()); // b.VC . 135 __ br(Assembler::VC, back); // b.VC back 136 __ br(Assembler::VC, forth); // b.VC forth 137 __ br(Assembler::HI, __ pc()); // b.HI . 138 __ br(Assembler::HI, back); // b.HI back 139 __ br(Assembler::HI, forth); // b.HI forth 140 __ br(Assembler::LS, __ pc()); // b.LS . 141 __ br(Assembler::LS, back); // b.LS back 142 __ br(Assembler::LS, forth); // b.LS forth 143 __ br(Assembler::GE, __ pc()); // b.GE . 144 __ br(Assembler::GE, back); // b.GE back 145 __ br(Assembler::GE, forth); // b.GE forth 146 __ br(Assembler::LT, __ pc()); // b.LT . 147 __ br(Assembler::LT, back); // b.LT back 148 __ br(Assembler::LT, forth); // b.LT forth 149 __ br(Assembler::GT, __ pc()); // b.GT . 150 __ br(Assembler::GT, back); // b.GT back 151 __ br(Assembler::GT, forth); // b.GT forth 152 __ br(Assembler::LE, __ pc()); // b.LE . 153 __ br(Assembler::LE, back); // b.LE back 154 __ br(Assembler::LE, forth); // b.LE forth 155 __ br(Assembler::AL, __ pc()); // b.AL . 156 __ br(Assembler::AL, back); // b.AL back 157 __ br(Assembler::AL, forth); // b.AL forth 158 __ br(Assembler::NV, __ pc()); // b.NV . 159 __ br(Assembler::NV, back); // b.NV back 160 __ br(Assembler::NV, forth); // b.NV forth 161 162 // ImmOp 163 __ svc(12999); // svc #12999 164 __ hvc(2665); // hvc #2665 165 __ smc(9002); // smc #9002 166 __ brk(14843); // brk #14843 167 __ hlt(25964); // hlt #25964 168 169 // Op 170 __ nop(); // nop 171 __ yield(); // yield 172 __ wfe(); // wfe 173 __ sev(); // sev 174 __ sevl(); // sevl 175 __ autia1716(); // autia1716 176 __ autiasp(); // autiasp 177 __ autiaz(); // autiaz 178 __ autib1716(); // autib1716 179 __ autibsp(); // autibsp 180 __ autibz(); // autibz 181 __ pacia1716(); // pacia1716 182 __ paciasp(); // paciasp 183 __ paciaz(); // paciaz 184 __ pacib1716(); // pacib1716 185 __ pacibsp(); // pacibsp 186 __ pacibz(); // pacibz 187 __ eret(); // eret 188 __ drps(); // drps 189 __ isb(); // isb 190 191 // PostfixExceptionOp 192 __ wfi(); // wfi 193 __ xpaclri(); // xpaclri 194 195 // SystemOp 196 __ dsb(Assembler::ST); // dsb ST 197 __ dmb(Assembler::OSHST); // dmb OSHST 198 199 // OneRegOp 200 __ br(r16); // br x16 201 __ blr(r20); // blr x20 202 __ paciza(r10); // paciza x10 203 __ pacizb(r27); // pacizb x27 204 __ pacdza(r8); // pacdza x8 205 __ pacdzb(r0); // pacdzb x0 206 __ autiza(r1); // autiza x1 207 __ autizb(r21); // autizb x21 208 __ autdza(r17); // autdza x17 209 __ autdzb(r29); // autdzb x29 210 __ xpacd(r29); // xpacd x29 211 __ braaz(r28); // braaz x28 212 __ brabz(r1); // brabz x1 213 __ blraaz(r23); // blraaz x23 214 __ blrabz(r21); // blrabz x21 215 216 // SystemOneRegOp 217 __ msr(3, 4, 4, 1, r20); // msr fpsr, x20 218 219 // SystemOneRegOp 220 __ msr(3, 4, 2, 0, r22); // msr nzcv, x22 221 222 // OneRegSystemOp 223 __ mrs(3, 4, 4, 1, r27); // mrs x27, fpsr 224 225 // OneRegSystemOp 226 __ mrs(3, 4, 2, 0, r19); // mrs x19, nzcv 227 228 // OneRegSystemOp 229 __ mrs(3, 0, 0, 7, r11); // mrs x11, dczid_el0 230 231 // OneRegSystemOp 232 __ mrs(3, 0, 0, 1, r16); // mrs x16, ctr_el0 233 234 // PostfixExceptionOneRegOp 235 __ xpaci(r6); // xpaci x6 236 237 // LoadStoreExclusiveOp 238 __ stxr(r17, r0, r4); // stxr w17, x0, [x4] 239 __ stlxr(r10, r24, r22); // stlxr w10, x24, [x22] 240 __ ldxr(r10, r19); // ldxr x10, [x19] 241 __ ldaxr(r1, r5); // ldaxr x1, [x5] 242 __ stlr(r30, r8); // stlr x30, [x8] 243 __ ldar(r12, r17); // ldar x12, [x17] 244 245 // LoadStoreExclusiveOp 246 __ stxrw(r9, r14, r7); // stxr w9, w14, [x7] 247 __ stlxrw(r1, r5, r16); // stlxr w1, w5, [x16] 248 __ ldxrw(r2, r12); // ldxr w2, [x12] 249 __ ldaxrw(r10, r12); // ldaxr w10, [x12] 250 __ stlrw(r3, r28); // stlr w3, [x28] 251 __ ldarw(r14, r26); // ldar w14, [x26] 252 253 // LoadStoreExclusiveOp 254 __ stxrh(r30, r10, r14); // stxrh w30, w10, [x14] 255 __ stlxrh(r21, r13, r9); // stlxrh w21, w13, [x9] 256 __ ldxrh(r22, r27); // ldxrh w22, [x27] 257 __ ldaxrh(r28, r19); // ldaxrh w28, [x19] 258 __ stlrh(r11, r30); // stlrh w11, [x30] 259 __ ldarh(r19, r2); // ldarh w19, [x2] 260 261 // LoadStoreExclusiveOp 262 __ stxrb(r2, r23, r1); // stxrb w2, w23, [x1] 263 __ stlxrb(r0, r12, r16); // stlxrb w0, w12, [x16] 264 __ ldxrb(r13, r15); // ldxrb w13, [x15] 265 __ ldaxrb(r17, r21); // ldaxrb w17, [x21] 266 __ stlrb(r13, r11); // stlrb w13, [x11] 267 __ ldarb(r30, r8); // ldarb w30, [x8] 268 269 // LoadStoreExclusiveOp 270 __ ldxp(r24, r13, r11); // ldxp x24, x13, [x11] 271 __ ldaxp(r1, r26, r21); // ldaxp x1, x26, [x21] 272 __ stxp(r27, r13, r20, r3); // stxp w27, x13, x20, [x3] 273 __ stlxp(r12, r6, r1, r29); // stlxp w12, x6, x1, [x29] 274 275 // LoadStoreExclusiveOp 276 __ ldxpw(r6, r4, r11); // ldxp w6, w4, [x11] 277 __ ldaxpw(r16, r4, r30); // ldaxp w16, w4, [x30] 278 __ stxpw(r30, r4, r12, r21); // stxp w30, w4, w12, [x21] 279 __ stlxpw(r27, r15, r28, r9); // stlxp w27, w15, w28, [x9] 280 281 // base_plus_unscaled_offset 282 // LoadStoreOp 283 __ str(r25, Address(r15, 1)); // str x25, [x15, 1] 284 __ strw(r2, Address(r1, -79)); // str w2, [x1, -79] 285 __ strb(r20, Address(r26, -22)); // strb w20, [x26, -22] 286 __ strh(r23, Address(r30, 22)); // strh w23, [x30, 22] 287 __ ldr(r26, Address(r28, -49)); // ldr x26, [x28, -49] 288 __ ldrw(r9, Address(r24, -128)); // ldr w9, [x24, -128] 289 __ ldrb(r12, Address(r12, -30)); // ldrb w12, [x12, -30] 290 __ ldrh(r1, Address(r15, 5)); // ldrh w1, [x15, 5] 291 __ ldrsb(r24, Address(r14, -31)); // ldrsb x24, [x14, -31] 292 __ ldrsh(r24, Address(r15, -6)); // ldrsh x24, [x15, -6] 293 __ ldrshw(r5, Address(r3, 12)); // ldrsh w5, [x3, 12] 294 __ ldrsw(r27, Address(r24, 17)); // ldrsw x27, [x24, 17] 295 __ ldrd(v13, Address(r29, -35)); // ldr d13, [x29, -35] 296 __ ldrs(v23, Address(r9, -47)); // ldr s23, [x9, -47] 297 __ strd(v11, Address(r0, 9)); // str d11, [x0, 9] 298 __ strs(v21, Address(r0, -127)); // str s21, [x0, -127] 299 300 // pre 301 // LoadStoreOp 302 __ str(r29, Address(__ pre(r3, -114))); // str x29, [x3, -114]! 303 __ strw(r17, Address(__ pre(r4, -72))); // str w17, [x4, -72]! 304 __ strb(r0, Address(__ pre(r2, -17))); // strb w0, [x2, -17]! 305 __ strh(r29, Address(__ pre(r1, 7))); // strh w29, [x1, 7]! 306 __ ldr(r16, Address(__ pre(r21, -133))); // ldr x16, [x21, -133]! 307 __ ldrw(r20, Address(__ pre(r14, 19))); // ldr w20, [x14, 19]! 308 __ ldrb(r22, Address(__ pre(r14, -3))); // ldrb w22, [x14, -3]! 309 __ ldrh(r15, Address(__ pre(r17, 9))); // ldrh w15, [x17, 9]! 310 __ ldrsb(r10, Address(__ pre(r15, -19))); // ldrsb x10, [x15, -19]! 311 __ ldrsh(r20, Address(__ pre(r12, -25))); // ldrsh x20, [x12, -25]! 312 __ ldrshw(r21, Address(__ pre(r10, -29))); // ldrsh w21, [x10, -29]! 313 __ ldrsw(r19, Address(__ pre(r0, 5))); // ldrsw x19, [x0, 5]! 314 __ ldrd(v0, Address(__ pre(r14, -54))); // ldr d0, [x14, -54]! 315 __ ldrs(v3, Address(__ pre(r1, 40))); // ldr s3, [x1, 40]! 316 __ strd(v4, Address(__ pre(r14, -94))); // str d4, [x14, -94]! 317 __ strs(v18, Address(__ pre(r28, -54))); // str s18, [x28, -54]! 318 319 // post 320 // LoadStoreOp 321 __ str(r22, Address(__ post(r15, -185))); // str x22, [x15], -185 322 __ strw(r17, Address(__ post(r14, -7))); // str w17, [x14], -7 323 __ strb(r30, Address(__ post(r11, -25))); // strb w30, [x11], -25 324 __ strh(r1, Address(__ post(r11, 20))); // strh w1, [x11], 20 325 __ ldr(r22, Address(__ post(r1, 2))); // ldr x22, [x1], 2 326 __ ldrw(r2, Address(__ post(r23, -119))); // ldr w2, [x23], -119 327 __ ldrb(r3, Address(__ post(r27, -12))); // ldrb w3, [x27], -12 328 __ ldrh(r16, Address(__ post(r7, -37))); // ldrh w16, [x7], -37 329 __ ldrsb(r15, Address(__ post(r26, 3))); // ldrsb x15, [x26], 3 330 __ ldrsh(r7, Address(__ post(r15, -30))); // ldrsh x7, [x15], -30 331 __ ldrshw(r3, Address(__ post(r11, -48))); // ldrsh w3, [x11], -48 332 __ ldrsw(r25, Address(__ post(r23, 22))); // ldrsw x25, [x23], 22 333 __ ldrd(v0, Address(__ post(r10, -215))); // ldr d0, [x10], -215 334 __ ldrs(v19, Address(__ post(r6, 55))); // ldr s19, [x6], 55 335 __ strd(v14, Address(__ post(r21, -234))); // str d14, [x21], -234 336 __ strs(v0, Address(__ post(r22, -70))); // str s0, [x22], -70 337 338 // base_plus_reg 339 // LoadStoreOp 340 __ str(r27, Address(r19, r0, Address::sxtx(0))); // str x27, [x19, x0, sxtx #0] 341 __ strw(r8, Address(r6, r13, Address::lsl(0))); // str w8, [x6, x13, lsl #0] 342 __ strb(r4, Address(r16, r22, Address::lsl(0))); // strb w4, [x16, x22, lsl #0] 343 __ strh(r25, Address(r26, r15, Address::uxtw(0))); // strh w25, [x26, w15, uxtw #0] 344 __ ldr(r4, Address(r5, r24, Address::sxtw(0))); // ldr x4, [x5, w24, sxtw #0] 345 __ ldrw(r4, Address(r17, r7, Address::uxtw(0))); // ldr w4, [x17, w7, uxtw #0] 346 __ ldrb(r17, Address(r7, r11, Address::lsl(0))); // ldrb w17, [x7, x11, lsl #0] 347 __ ldrh(r0, Address(r30, r23, Address::lsl(0))); // ldrh w0, [x30, x23, lsl #0] 348 __ ldrsb(r10, Address(r22, r1, Address::uxtw(0))); // ldrsb x10, [x22, w1, uxtw #0] 349 __ ldrsh(r21, Address(r30, r30, Address::sxtw(1))); // ldrsh x21, [x30, w30, sxtw #1] 350 __ ldrshw(r11, Address(r10, r28, Address::sxtw(1))); // ldrsh w11, [x10, w28, sxtw #1] 351 __ ldrsw(r28, Address(r19, r10, Address::uxtw(0))); // ldrsw x28, [x19, w10, uxtw #0] 352 __ ldrd(v30, Address(r29, r14, Address::sxtw(0))); // ldr d30, [x29, w14, sxtw #0] 353 __ ldrs(v8, Address(r5, r5, Address::sxtw(2))); // ldr s8, [x5, w5, sxtw #2] 354 __ strd(v25, Address(r8, r13, Address::sxtx(0))); // str d25, [x8, x13, sxtx #0] 355 __ strs(v17, Address(r24, r26, Address::lsl(2))); // str s17, [x24, x26, lsl #2] 356 357 // base_plus_scaled_offset 358 // LoadStoreOp 359 __ str(r19, Address(r12, 15904)); // str x19, [x12, 15904] 360 __ strw(r23, Address(r15, 7892)); // str w23, [x15, 7892] 361 __ strb(r29, Address(r13, 1970)); // strb w29, [x13, 1970] 362 __ strh(r11, Address(r7, 3094)); // strh w11, [x7, 3094] 363 __ ldr(r10, Address(r24, 14992)); // ldr x10, [x24, 14992] 364 __ ldrw(r16, Address(r0, 6160)); // ldr w16, [x0, 6160] 365 __ ldrb(r20, Address(r1, 2032)); // ldrb w20, [x1, 2032] 366 __ ldrh(r1, Address(r17, 4056)); // ldrh w1, [x17, 4056] 367 __ ldrsb(r17, Address(r25, 1889)); // ldrsb x17, [x25, 1889] 368 __ ldrsh(r27, Address(r25, 3964)); // ldrsh x27, [x25, 3964] 369 __ ldrshw(r14, Address(r17, 3724)); // ldrsh w14, [x17, 3724] 370 __ ldrsw(r10, Address(r7, 6372)); // ldrsw x10, [x7, 6372] 371 __ ldrd(v3, Address(r25, 12392)); // ldr d3, [x25, 12392] 372 __ ldrs(v12, Address(r9, 7840)); // ldr s12, [x9, 7840] 373 __ strd(v24, Address(r1, 12728)); // str d24, [x1, 12728] 374 __ strs(v3, Address(r20, 6924)); // str s3, [x20, 6924] 375 376 // pcrel 377 // LoadStoreOp 378 __ ldr(r2, back); // ldr x2, back 379 __ ldrw(r29, __ pc()); // ldr w29, . 380 381 // LoadStoreOp 382 __ prfm(Address(r14, 93)); // prfm PLDL1KEEP, [x14, 93] 383 384 // LoadStoreOp 385 __ prfm(back); // prfm PLDL1KEEP, back 386 387 // LoadStoreOp 388 __ prfm(Address(r1, r7, Address::lsl(3))); // prfm PLDL1KEEP, [x1, x7, lsl #3] 389 390 // LoadStoreOp 391 __ prfm(Address(r17, 12288)); // prfm PLDL1KEEP, [x17, 12288] 392 393 // AddSubCarryOp 394 __ adcw(r1, r24, r3); // adc w1, w24, w3 395 __ adcsw(r17, r24, r20); // adcs w17, w24, w20 396 __ sbcw(r11, r0, r13); // sbc w11, w0, w13 397 __ sbcsw(r28, r10, r7); // sbcs w28, w10, w7 398 __ adc(r4, r15, r16); // adc x4, x15, x16 399 __ adcs(r2, r12, r20); // adcs x2, x12, x20 400 __ sbc(r29, r13, r13); // sbc x29, x13, x13 401 __ sbcs(r14, r6, r12); // sbcs x14, x6, x12 402 403 // AddSubExtendedOp 404 __ addw(r20, r12, r17, ext::sxtx, 4); // add w20, w12, w17, sxtx #4 405 __ addsw(r27, r11, r0, ext::uxtx, 3); // adds w27, w11, w0, uxtx #3 406 __ sub(r7, r1, r9, ext::sxtx, 4); // sub x7, x1, x9, sxtx #4 407 __ subsw(r3, r27, r1, ext::uxtb, 3); // subs w3, w27, w1, uxtb #3 408 __ add(r13, r26, r12, ext::sxth, 4); // add x13, x26, x12, sxth #4 409 __ adds(r17, r5, r10, ext::sxtb, 2); // adds x17, x5, x10, sxtb #2 410 __ sub(r30, r8, r15, ext::uxtw, 4); // sub x30, x8, x15, uxtw #4 411 __ subs(r19, r23, r19, ext::uxth, 4); // subs x19, x23, x19, uxth #4 412 413 // ConditionalCompareOp 414 __ ccmnw(r29, r5, 10u, Assembler::LO); // ccmn w29, w5, #10, LO 415 __ ccmpw(r9, r13, 11u, Assembler::LO); // ccmp w9, w13, #11, LO 416 __ ccmn(r10, r4, 6u, Assembler::HS); // ccmn x10, x4, #6, HS 417 __ ccmp(r12, r2, 12u, Assembler::HI); // ccmp x12, x2, #12, HI 418 419 // ConditionalCompareImmedOp 420 __ ccmnw(r16, 6, 2, Assembler::VS); // ccmn w16, #6, #2, VS 421 __ ccmpw(r7, 11, 13, Assembler::VS); // ccmp w7, #11, #13, VS 422 __ ccmn(r27, 10, 11, Assembler::LS); // ccmn x27, #10, #11, LS 423 __ ccmp(r3, 13, 13, Assembler::LE); // ccmp x3, #13, #13, LE 424 425 // ConditionalSelectOp 426 __ cselw(r26, r27, r10, Assembler::VS); // csel w26, w27, w10, VS 427 __ csincw(r10, r21, r28, Assembler::LE); // csinc w10, w21, w28, LE 428 __ csinvw(r23, r9, r27, Assembler::LE); // csinv w23, w9, w27, LE 429 __ csnegw(r10, r29, r15, Assembler::LE); // csneg w10, w29, w15, LE 430 __ csel(r30, r25, r21, Assembler::HS); // csel x30, x25, x21, HS 431 __ csinc(r0, r17, r21, Assembler::GT); // csinc x0, x17, x21, GT 432 __ csinv(r16, r21, r20, Assembler::CS); // csinv x16, x21, x20, CS 433 __ csneg(r19, r30, r3, Assembler::LS); // csneg x19, x30, x3, LS 434 435 // TwoRegOp 436 __ rbitw(r19, r11); // rbit w19, w11 437 __ rev16w(r24, r0); // rev16 w24, w0 438 __ revw(r27, r25); // rev w27, w25 439 __ clzw(r14, r3); // clz w14, w3 440 __ clsw(r14, r17); // cls w14, w17 441 __ rbit(r7, r15); // rbit x7, x15 442 __ rev16(r24, r28); // rev16 x24, x28 443 __ rev32(r17, r25); // rev32 x17, x25 444 __ rev(r2, r26); // rev x2, x26 445 __ clz(r28, r5); // clz x28, x5 446 __ cls(r25, r26); // cls x25, x26 447 __ pacia(r27, r16); // pacia x27, x16 448 __ pacib(r17, r6); // pacib x17, x6 449 __ pacda(r21, r12); // pacda x21, x12 450 __ pacdb(r0, r4); // pacdb x0, x4 451 __ autia(r12, r27); // autia x12, x27 452 __ autib(r17, r28); // autib x17, x28 453 __ autda(r28, r2); // autda x28, x2 454 __ autdb(r17, r10); // autdb x17, x10 455 __ braa(r15, r14); // braa x15, x14 456 __ brab(r14, r3); // brab x14, x3 457 __ blraa(r25, r15); // blraa x25, x15 458 __ blrab(r19, r14); // blrab x19, x14 459 460 // ThreeRegOp 461 __ udivw(r5, r16, r4); // udiv w5, w16, w4 462 __ sdivw(r26, r25, r4); // sdiv w26, w25, w4 463 __ lslvw(r2, r2, r12); // lslv w2, w2, w12 464 __ lsrvw(r29, r17, r8); // lsrv w29, w17, w8 465 __ asrvw(r7, r3, r4); // asrv w7, w3, w4 466 __ rorvw(r25, r4, r26); // rorv w25, w4, w26 467 __ udiv(r25, r4, r17); // udiv x25, x4, x17 468 __ sdiv(r0, r26, r17); // sdiv x0, x26, x17 469 __ lslv(r23, r15, r21); // lslv x23, x15, x21 470 __ lsrv(r28, r17, r27); // lsrv x28, x17, x27 471 __ asrv(r10, r3, r0); // asrv x10, x3, x0 472 __ rorv(r7, r25, r9); // rorv x7, x25, x9 473 __ umulh(r6, r15, r29); // umulh x6, x15, x29 474 __ smulh(r15, r10, r2); // smulh x15, x10, x2 475 476 // FourRegMulOp 477 __ maddw(r17, r7, r11, r11); // madd w17, w7, w11, w11 478 __ msubw(r23, r7, r29, r23); // msub w23, w7, w29, w23 479 __ madd(r14, r27, r11, r11); // madd x14, x27, x11, x11 480 __ msub(r4, r24, r12, r15); // msub x4, x24, x12, x15 481 __ smaddl(r14, r20, r11, r28); // smaddl x14, w20, w11, x28 482 __ smsubl(r13, r11, r12, r23); // smsubl x13, w11, w12, x23 483 __ umaddl(r30, r26, r14, r9); // umaddl x30, w26, w14, x9 484 __ umsubl(r13, r10, r7, r5); // umsubl x13, w10, w7, x5 485 486 // ThreeRegFloatOp 487 __ fabdh(v30, v15, v3); // fabd h30, h15, h3 488 __ fmulh(v12, v12, v16); // fmul h12, h12, h16 489 __ fdivh(v31, v31, v18); // fdiv h31, h31, h18 490 __ faddh(v19, v21, v16); // fadd h19, h21, h16 491 __ fsubh(v15, v10, v21); // fsub h15, h10, h21 492 __ fmaxh(v2, v10, v28); // fmax h2, h10, h28 493 __ fminh(v7, v30, v31); // fmin h7, h30, h31 494 __ fnmulh(v18, v1, v2); // fnmul h18, h1, h2 495 __ fabds(v6, v10, v3); // fabd s6, s10, s3 496 __ fmuls(v25, v11, v7); // fmul s25, s11, s7 497 __ fdivs(v1, v12, v0); // fdiv s1, s12, s0 498 __ fadds(v3, v19, v29); // fadd s3, s19, s29 499 __ fsubs(v6, v23, v6); // fsub s6, s23, s6 500 __ fmaxs(v0, v28, v27); // fmax s0, s28, s27 501 __ fmins(v2, v5, v7); // fmin s2, s5, s7 502 __ fnmuls(v29, v12, v25); // fnmul s29, s12, s25 503 __ fabdd(v13, v12, v24); // fabd d13, d12, d24 504 __ fmuld(v19, v8, v18); // fmul d19, d8, d18 505 __ fdivd(v22, v26, v21); // fdiv d22, d26, d21 506 __ faddd(v20, v19, v2); // fadd d20, d19, d2 507 __ fsubd(v30, v22, v8); // fsub d30, d22, d8 508 __ fmaxd(v22, v19, v21); // fmax d22, d19, d21 509 __ fmind(v12, v18, v21); // fmin d12, d18, d21 510 __ fnmuld(v6, v16, v3); // fnmul d6, d16, d3 511 512 // FourRegFloatOp 513 __ fmaddh(v3, v29, v3, v28); // fmadd h3, h29, h3, h28 514 __ fmadds(v15, v14, v10, v13); // fmadd s15, s14, s10, s13 515 __ fmsubs(v12, v18, v10, v26); // fmsub s12, s18, s10, s26 516 __ fnmadds(v7, v7, v15, v29); // fnmadd s7, s7, s15, s29 517 __ fnmadds(v0, v23, v0, v12); // fnmadd s0, s23, s0, s12 518 __ fmaddd(v24, v14, v13, v8); // fmadd d24, d14, d13, d8 519 __ fmsubd(v15, v7, v9, v20); // fmsub d15, d7, d9, d20 520 __ fnmaddd(v19, v29, v31, v16); // fnmadd d19, d29, d31, d16 521 __ fnmaddd(v2, v9, v16, v21); // fnmadd d2, d9, d16, d21 522 523 // TwoRegFloatOp 524 __ fmovs(v30, v4); // fmov s30, s4 525 __ fabss(v1, v27); // fabs s1, s27 526 __ fnegs(v25, v24); // fneg s25, s24 527 __ fsqrts(v14, v21); // fsqrt s14, s21 528 __ fcvts(v13, v6); // fcvt d13, s6 529 __ fcvtsh(v12, v25); // fcvt h12, s25 530 __ fcvths(v25, v30); // fcvt s25, h30 531 __ fmovd(v28, v21); // fmov d28, d21 532 __ fabsd(v16, v23); // fabs d16, d23 533 __ fnegd(v5, v29); // fneg d5, d29 534 __ fsqrtd(v22, v19); // fsqrt d22, d19 535 __ fcvtd(v13, v20); // fcvt s13, d20 536 __ fsqrth(v19, v28); // fsqrt h19, h28 537 538 // FloatConvertOp 539 __ fcvtzsw(r17, v6); // fcvtzs w17, s6 540 __ fcvtzs(r13, v7); // fcvtzs x13, s7 541 __ fcvtzdw(r28, v26); // fcvtzs w28, d26 542 __ fcvtzd(r17, v6); // fcvtzs x17, d6 543 __ scvtfws(v1, r4); // scvtf s1, w4 544 __ scvtfs(v14, r20); // scvtf s14, x20 545 __ scvtfwd(v7, r21); // scvtf d7, w21 546 __ scvtfd(v27, r23); // scvtf d27, x23 547 __ fcvtassw(r13, v20); // fcvtas w13, s20 548 __ fcvtasd(r30, v28); // fcvtas x30, d28 549 __ fcvtmssw(r10, v21); // fcvtms w10, s21 550 __ fcvtmsd(r5, v17); // fcvtms x5, d17 551 __ fmovs(r11, v14); // fmov w11, s14 552 __ fmovd(r13, v21); // fmov x13, d21 553 __ fmovs(v27, r14); // fmov s27, w14 554 __ fmovd(v4, r23); // fmov d4, x23 555 556 // TwoRegFloatOp 557 __ fcmps(v24, v30); // fcmp s24, s30 558 __ fcmpd(v12, v14); // fcmp d12, d14 559 __ fcmps(v17, 0.0); // fcmp s17, #0.0 560 __ fcmpd(v28, 0.0); // fcmp d28, #0.0 561 562 // LoadStorePairOp 563 __ stpw(r0, r6, Address(r26, 16)); // stp w0, w6, [x26, #16] 564 __ ldpw(r0, r30, Address(r6, -32)); // ldp w0, w30, [x6, #-32] 565 __ ldpsw(r16, r2, Address(r11, -208)); // ldpsw x16, x2, [x11, #-208] 566 __ stp(r15, r0, Address(r12, 128)); // stp x15, x0, [x12, #128] 567 __ ldp(r7, r30, Address(r23, 32)); // ldp x7, x30, [x23, #32] 568 569 // LoadStorePairOp 570 __ stpw(r26, r15, Address(__ pre(r7, -256))); // stp w26, w15, [x7, #-256]! 571 __ ldpw(r11, r15, Address(__ pre(r10, -32))); // ldp w11, w15, [x10, #-32]! 572 __ ldpsw(r19, r16, Address(__ pre(r1, 64))); // ldpsw x19, x16, [x1, #64]! 573 __ stp(r14, r9, Address(__ pre(r0, 128))); // stp x14, x9, [x0, #128]! 574 __ ldp(r27, r3, Address(__ pre(r12, -96))); // ldp x27, x3, [x12, #-96]! 575 576 // LoadStorePairOp 577 __ stpw(r8, r11, Address(__ post(r12, -256))); // stp w8, w11, [x12], #-256 578 __ ldpw(r10, r16, Address(__ post(r4, 64))); // ldp w10, w16, [x4], #64 579 __ ldpsw(r10, r30, Address(__ post(r19, -64))); // ldpsw x10, x30, [x19], #-64 580 __ stp(r24, r2, Address(__ post(r15, -96))); // stp x24, x2, [x15], #-96 581 __ ldp(r24, r10, Address(__ post(r16, 80))); // ldp x24, x10, [x16], #80 582 583 // LoadStorePairOp 584 __ stnpw(r30, r21, Address(r29, 16)); // stnp w30, w21, [x29, #16] 585 __ ldnpw(r8, r30, Address(r10, -112)); // ldnp w8, w30, [x10, #-112] 586 __ stnp(r30, r26, Address(r6, -128)); // stnp x30, x26, [x6, #-128] 587 __ ldnp(r24, r2, Address(r20, 64)); // ldnp x24, x2, [x20, #64] 588 589 // LdStNEONOp 590 __ ld1(v31, __ T8B, Address(r25)); // ld1 {v31.8B}, [x25] 591 __ ld1(v5, v6, __ T16B, Address(__ post(r15, 32))); // ld1 {v5.16B, v6.16B}, [x15], 32 592 __ ld1(v10, v11, v12, __ T1D, Address(__ post(r7, r13))); // ld1 {v10.1D, v11.1D, v12.1D}, [x7], x13 593 __ ld1(v13, v14, v15, v16, __ T8H, Address(__ post(r16, 64))); // ld1 {v13.8H, v14.8H, v15.8H, v16.8H}, [x16], 64 594 __ ld1r(v7, __ T8B, Address(r17)); // ld1r {v7.8B}, [x17] 595 __ ld1r(v16, __ T4S, Address(__ post(r25, 4))); // ld1r {v16.4S}, [x25], 4 596 __ ld1r(v11, __ T1D, Address(__ post(r3, r7))); // ld1r {v11.1D}, [x3], x7 597 __ ld2(v13, v14, __ T2D, Address(r7)); // ld2 {v13.2D, v14.2D}, [x7] 598 __ ld2(v9, v10, __ T4H, Address(__ post(r27, 16))); // ld2 {v9.4H, v10.4H}, [x27], 16 599 __ ld2r(v6, v7, __ T16B, Address(r26)); // ld2r {v6.16B, v7.16B}, [x26] 600 __ ld2r(v23, v24, __ T2S, Address(__ post(r16, 8))); // ld2r {v23.2S, v24.2S}, [x16], 8 601 __ ld2r(v6, v7, __ T2D, Address(__ post(r13, r8))); // ld2r {v6.2D, v7.2D}, [x13], x8 602 __ ld3(v20, v21, v22, __ T4S, Address(__ post(r1, r26))); // ld3 {v20.4S, v21.4S, v22.4S}, [x1], x26 603 __ ld3(v15, v16, v17, __ T2S, Address(r15)); // ld3 {v15.2S, v16.2S, v17.2S}, [x15] 604 __ ld3r(v29, v30, v31, __ T8H, Address(r22)); // ld3r {v29.8H, v30.8H, v31.8H}, [x22] 605 __ ld3r(v6, v7, v8, __ T4S, Address(__ post(r10, 12))); // ld3r {v6.4S, v7.4S, v8.4S}, [x10], 12 606 __ ld3r(v15, v16, v17, __ T1D, Address(__ post(r6, r15))); // ld3r {v15.1D, v16.1D, v17.1D}, [x6], x15 607 __ ld4(v6, v7, v8, v9, __ T8H, Address(__ post(r10, 64))); // ld4 {v6.8H, v7.8H, v8.8H, v9.8H}, [x10], 64 608 __ ld4(v11, v12, v13, v14, __ T8B, Address(__ post(r3, r7))); // ld4 {v11.8B, v12.8B, v13.8B, v14.8B}, [x3], x7 609 __ ld4r(v12, v13, v14, v15, __ T8B, Address(r25)); // ld4r {v12.8B, v13.8B, v14.8B, v15.8B}, [x25] 610 __ ld4r(v11, v12, v13, v14, __ T4H, Address(__ post(r15, 8))); // ld4r {v11.4H, v12.4H, v13.4H, v14.4H}, [x15], 8 611 __ ld4r(v30, v31, v0, v1, __ T2S, Address(__ post(r6, r28))); // ld4r {v30.2S, v31.2S, v0.2S, v1.2S}, [x6], x28 612 613 // NEONReduceInstruction 614 __ addv(v27, __ T8B, v28); // addv b27, v28.8B 615 __ addv(v28, __ T16B, v29); // addv b28, v29.16B 616 __ addv(v1, __ T4H, v2); // addv h1, v2.4H 617 __ addv(v28, __ T8H, v29); // addv h28, v29.8H 618 __ addv(v1, __ T4S, v2); // addv s1, v2.4S 619 __ smaxv(v20, __ T8B, v21); // smaxv b20, v21.8B 620 __ smaxv(v29, __ T16B, v30); // smaxv b29, v30.16B 621 __ smaxv(v16, __ T4H, v17); // smaxv h16, v17.4H 622 __ smaxv(v13, __ T8H, v14); // smaxv h13, v14.8H 623 __ smaxv(v10, __ T4S, v11); // smaxv s10, v11.4S 624 __ fmaxv(v29, __ T4S, v30); // fmaxv s29, v30.4S 625 __ sminv(v29, __ T8B, v30); // sminv b29, v30.8B 626 __ uminv(v19, __ T8B, v20); // uminv b19, v20.8B 627 __ sminv(v22, __ T16B, v23); // sminv b22, v23.16B 628 __ uminv(v10, __ T16B, v11); // uminv b10, v11.16B 629 __ sminv(v4, __ T4H, v5); // sminv h4, v5.4H 630 __ uminv(v31, __ T4H, v0); // uminv h31, v0.4H 631 __ sminv(v21, __ T8H, v22); // sminv h21, v22.8H 632 __ uminv(v8, __ T8H, v9); // uminv h8, v9.8H 633 __ sminv(v31, __ T4S, v0); // sminv s31, v0.4S 634 __ uminv(v19, __ T4S, v20); // uminv s19, v20.4S 635 __ fminv(v10, __ T4S, v11); // fminv s10, v11.4S 636 __ fmaxp(v28, v29, __ S); // fmaxp s28, v29.2S 637 __ fmaxp(v2, v3, __ D); // fmaxp d2, v3.2D 638 __ fminp(v25, v26, __ S); // fminp s25, v26.2S 639 __ fminp(v5, v6, __ D); // fminp d5, v6.2D 640 641 // NEONFloatCompareWithZero 642 __ fcm(Assembler::GT, v3, __ T2S, v4); // fcmgt v3.2S, v4.2S, #0.0 643 __ fcm(Assembler::GT, v8, __ T4S, v9); // fcmgt v8.4S, v9.4S, #0.0 644 __ fcm(Assembler::GT, v22, __ T2D, v23); // fcmgt v22.2D, v23.2D, #0.0 645 __ fcm(Assembler::GE, v19, __ T2S, v20); // fcmge v19.2S, v20.2S, #0.0 646 __ fcm(Assembler::GE, v13, __ T4S, v14); // fcmge v13.4S, v14.4S, #0.0 647 __ fcm(Assembler::GE, v5, __ T2D, v6); // fcmge v5.2D, v6.2D, #0.0 648 __ fcm(Assembler::EQ, v29, __ T2S, v30); // fcmeq v29.2S, v30.2S, #0.0 649 __ fcm(Assembler::EQ, v24, __ T4S, v25); // fcmeq v24.4S, v25.4S, #0.0 650 __ fcm(Assembler::EQ, v21, __ T2D, v22); // fcmeq v21.2D, v22.2D, #0.0 651 __ fcm(Assembler::LT, v26, __ T2S, v27); // fcmlt v26.2S, v27.2S, #0.0 652 __ fcm(Assembler::LT, v24, __ T4S, v25); // fcmlt v24.4S, v25.4S, #0.0 653 __ fcm(Assembler::LT, v3, __ T2D, v4); // fcmlt v3.2D, v4.2D, #0.0 654 __ fcm(Assembler::LE, v24, __ T2S, v25); // fcmle v24.2S, v25.2S, #0.0 655 __ fcm(Assembler::LE, v26, __ T4S, v27); // fcmle v26.4S, v27.4S, #0.0 656 __ fcm(Assembler::LE, v23, __ T2D, v24); // fcmle v23.2D, v24.2D, #0.0 657 658 // TwoRegNEONOp 659 __ absr(v15, __ T8B, v16); // abs v15.8B, v16.8B 660 __ absr(v21, __ T16B, v22); // abs v21.16B, v22.16B 661 __ absr(v3, __ T4H, v4); // abs v3.4H, v4.4H 662 __ absr(v24, __ T8H, v25); // abs v24.8H, v25.8H 663 __ absr(v8, __ T2S, v9); // abs v8.2S, v9.2S 664 __ absr(v25, __ T4S, v26); // abs v25.4S, v26.4S 665 __ absr(v20, __ T2D, v21); // abs v20.2D, v21.2D 666 __ fabs(v16, __ T2S, v17); // fabs v16.2S, v17.2S 667 __ fabs(v17, __ T4S, v18); // fabs v17.4S, v18.4S 668 __ fabs(v2, __ T2D, v3); // fabs v2.2D, v3.2D 669 __ fabs(v1, __ T4H, v2); // fabs v1.4H, v2.4H 670 __ fabs(v0, __ T8H, v1); // fabs v0.8H, v1.8H 671 __ fneg(v24, __ T2S, v25); // fneg v24.2S, v25.2S 672 __ fneg(v4, __ T4S, v5); // fneg v4.4S, v5.4S 673 __ fneg(v3, __ T2D, v4); // fneg v3.2D, v4.2D 674 __ fneg(v12, __ T4H, v13); // fneg v12.4H, v13.4H 675 __ fneg(v31, __ T8H, v0); // fneg v31.8H, v0.8H 676 __ fsqrt(v28, __ T2S, v29); // fsqrt v28.2S, v29.2S 677 __ fsqrt(v10, __ T4S, v11); // fsqrt v10.4S, v11.4S 678 __ fsqrt(v26, __ T2D, v27); // fsqrt v26.2D, v27.2D 679 __ fsqrt(v2, __ T4H, v3); // fsqrt v2.4H, v3.4H 680 __ fsqrt(v12, __ T8H, v13); // fsqrt v12.8H, v13.8H 681 __ notr(v18, __ T8B, v19); // not v18.8B, v19.8B 682 __ notr(v31, __ T16B, v0); // not v31.16B, v0.16B 683 684 // ThreeRegNEONOp 685 __ andr(v1, __ T8B, v2, v3); // and v1.8B, v2.8B, v3.8B 686 __ andr(v13, __ T16B, v14, v15); // and v13.16B, v14.16B, v15.16B 687 __ orr(v29, __ T8B, v30, v31); // orr v29.8B, v30.8B, v31.8B 688 __ orr(v0, __ T16B, v1, v2); // orr v0.16B, v1.16B, v2.16B 689 __ eor(v19, __ T8B, v20, v21); // eor v19.8B, v20.8B, v21.8B 690 __ eor(v12, __ T16B, v13, v14); // eor v12.16B, v13.16B, v14.16B 691 __ addv(v17, __ T8B, v18, v19); // add v17.8B, v18.8B, v19.8B 692 __ addv(v22, __ T16B, v23, v24); // add v22.16B, v23.16B, v24.16B 693 __ addv(v13, __ T4H, v14, v15); // add v13.4H, v14.4H, v15.4H 694 __ addv(v28, __ T8H, v29, v30); // add v28.8H, v29.8H, v30.8H 695 __ addv(v30, __ T2S, v31, v0); // add v30.2S, v31.2S, v0.2S 696 __ addv(v31, __ T4S, v0, v1); // add v31.4S, v0.4S, v1.4S 697 __ addv(v1, __ T2D, v2, v3); // add v1.2D, v2.2D, v3.2D 698 __ sqaddv(v26, __ T8B, v27, v28); // sqadd v26.8B, v27.8B, v28.8B 699 __ sqaddv(v28, __ T16B, v29, v30); // sqadd v28.16B, v29.16B, v30.16B 700 __ sqaddv(v4, __ T4H, v5, v6); // sqadd v4.4H, v5.4H, v6.4H 701 __ sqaddv(v30, __ T8H, v31, v0); // sqadd v30.8H, v31.8H, v0.8H 702 __ sqaddv(v4, __ T2S, v5, v6); // sqadd v4.2S, v5.2S, v6.2S 703 __ sqaddv(v6, __ T4S, v7, v8); // sqadd v6.4S, v7.4S, v8.4S 704 __ sqaddv(v30, __ T2D, v31, v0); // sqadd v30.2D, v31.2D, v0.2D 705 __ uqaddv(v26, __ T8B, v27, v28); // uqadd v26.8B, v27.8B, v28.8B 706 __ uqaddv(v18, __ T16B, v19, v20); // uqadd v18.16B, v19.16B, v20.16B 707 __ uqaddv(v9, __ T4H, v10, v11); // uqadd v9.4H, v10.4H, v11.4H 708 __ uqaddv(v8, __ T8H, v9, v10); // uqadd v8.8H, v9.8H, v10.8H 709 __ uqaddv(v12, __ T2S, v13, v14); // uqadd v12.2S, v13.2S, v14.2S 710 __ uqaddv(v0, __ T4S, v1, v2); // uqadd v0.4S, v1.4S, v2.4S 711 __ uqaddv(v20, __ T2D, v21, v22); // uqadd v20.2D, v21.2D, v22.2D 712 __ fadd(v1, __ T2S, v2, v3); // fadd v1.2S, v2.2S, v3.2S 713 __ fadd(v24, __ T4S, v25, v26); // fadd v24.4S, v25.4S, v26.4S 714 __ fadd(v2, __ T2D, v3, v4); // fadd v2.2D, v3.2D, v4.2D 715 __ fadd(v0, __ T4H, v1, v2); // fadd v0.4H, v1.4H, v2.4H 716 __ fadd(v9, __ T8H, v10, v11); // fadd v9.8H, v10.8H, v11.8H 717 __ subv(v24, __ T8B, v25, v26); // sub v24.8B, v25.8B, v26.8B 718 __ subv(v26, __ T16B, v27, v28); // sub v26.16B, v27.16B, v28.16B 719 __ subv(v16, __ T4H, v17, v18); // sub v16.4H, v17.4H, v18.4H 720 __ subv(v30, __ T8H, v31, v0); // sub v30.8H, v31.8H, v0.8H 721 __ subv(v3, __ T2S, v4, v5); // sub v3.2S, v4.2S, v5.2S 722 __ subv(v10, __ T4S, v11, v12); // sub v10.4S, v11.4S, v12.4S 723 __ subv(v23, __ T2D, v24, v25); // sub v23.2D, v24.2D, v25.2D 724 __ sqsubv(v10, __ T8B, v11, v12); // sqsub v10.8B, v11.8B, v12.8B 725 __ sqsubv(v4, __ T16B, v5, v6); // sqsub v4.16B, v5.16B, v6.16B 726 __ sqsubv(v18, __ T4H, v19, v20); // sqsub v18.4H, v19.4H, v20.4H 727 __ sqsubv(v2, __ T8H, v3, v4); // sqsub v2.8H, v3.8H, v4.8H 728 __ sqsubv(v11, __ T2S, v12, v13); // sqsub v11.2S, v12.2S, v13.2S 729 __ sqsubv(v8, __ T4S, v9, v10); // sqsub v8.4S, v9.4S, v10.4S 730 __ sqsubv(v10, __ T2D, v11, v12); // sqsub v10.2D, v11.2D, v12.2D 731 __ uqsubv(v15, __ T8B, v16, v17); // uqsub v15.8B, v16.8B, v17.8B 732 __ uqsubv(v17, __ T16B, v18, v19); // uqsub v17.16B, v18.16B, v19.16B 733 __ uqsubv(v2, __ T4H, v3, v4); // uqsub v2.4H, v3.4H, v4.4H 734 __ uqsubv(v10, __ T8H, v11, v12); // uqsub v10.8H, v11.8H, v12.8H 735 __ uqsubv(v12, __ T2S, v13, v14); // uqsub v12.2S, v13.2S, v14.2S 736 __ uqsubv(v12, __ T4S, v13, v14); // uqsub v12.4S, v13.4S, v14.4S 737 __ uqsubv(v15, __ T2D, v16, v17); // uqsub v15.2D, v16.2D, v17.2D 738 __ fsub(v13, __ T2S, v14, v15); // fsub v13.2S, v14.2S, v15.2S 739 __ fsub(v2, __ T4S, v3, v4); // fsub v2.4S, v3.4S, v4.4S 740 __ fsub(v7, __ T2D, v8, v9); // fsub v7.2D, v8.2D, v9.2D 741 __ fsub(v20, __ T4H, v21, v22); // fsub v20.4H, v21.4H, v22.4H 742 __ fsub(v26, __ T8H, v27, v28); // fsub v26.8H, v27.8H, v28.8H 743 __ mulv(v16, __ T8B, v17, v18); // mul v16.8B, v17.8B, v18.8B 744 __ mulv(v4, __ T16B, v5, v6); // mul v4.16B, v5.16B, v6.16B 745 __ mulv(v2, __ T4H, v3, v4); // mul v2.4H, v3.4H, v4.4H 746 __ mulv(v4, __ T8H, v5, v6); // mul v4.8H, v5.8H, v6.8H 747 __ mulv(v12, __ T2S, v13, v14); // mul v12.2S, v13.2S, v14.2S 748 __ mulv(v18, __ T4S, v19, v20); // mul v18.4S, v19.4S, v20.4S 749 __ fabd(v21, __ T2S, v22, v23); // fabd v21.2S, v22.2S, v23.2S 750 __ fabd(v16, __ T4S, v17, v18); // fabd v16.4S, v17.4S, v18.4S 751 __ fabd(v18, __ T2D, v19, v20); // fabd v18.2D, v19.2D, v20.2D 752 __ fabd(v11, __ T4H, v12, v13); // fabd v11.4H, v12.4H, v13.4H 753 __ fabd(v21, __ T8H, v22, v23); // fabd v21.8H, v22.8H, v23.8H 754 __ faddp(v23, __ T2S, v24, v25); // faddp v23.2S, v24.2S, v25.2S 755 __ faddp(v12, __ T4S, v13, v14); // faddp v12.4S, v13.4S, v14.4S 756 __ faddp(v26, __ T2D, v27, v28); // faddp v26.2D, v27.2D, v28.2D 757 __ faddp(v23, __ T4H, v24, v25); // faddp v23.4H, v24.4H, v25.4H 758 __ faddp(v28, __ T8H, v29, v30); // faddp v28.8H, v29.8H, v30.8H 759 __ fmul(v14, __ T2S, v15, v16); // fmul v14.2S, v15.2S, v16.2S 760 __ fmul(v11, __ T4S, v12, v13); // fmul v11.4S, v12.4S, v13.4S 761 __ fmul(v24, __ T2D, v25, v26); // fmul v24.2D, v25.2D, v26.2D 762 __ fmul(v1, __ T4H, v2, v3); // fmul v1.4H, v2.4H, v3.4H 763 __ fmul(v12, __ T8H, v13, v14); // fmul v12.8H, v13.8H, v14.8H 764 __ mlav(v31, __ T4H, v0, v1); // mla v31.4H, v0.4H, v1.4H 765 __ mlav(v10, __ T8H, v11, v12); // mla v10.8H, v11.8H, v12.8H 766 __ mlav(v16, __ T2S, v17, v18); // mla v16.2S, v17.2S, v18.2S 767 __ mlav(v7, __ T4S, v8, v9); // mla v7.4S, v8.4S, v9.4S 768 __ fmla(v2, __ T2S, v3, v4); // fmla v2.2S, v3.2S, v4.2S 769 __ fmla(v3, __ T4S, v4, v5); // fmla v3.4S, v4.4S, v5.4S 770 __ fmla(v13, __ T2D, v14, v15); // fmla v13.2D, v14.2D, v15.2D 771 __ fmla(v19, __ T4H, v20, v21); // fmla v19.4H, v20.4H, v21.4H 772 __ fmla(v17, __ T8H, v18, v19); // fmla v17.8H, v18.8H, v19.8H 773 __ mlsv(v16, __ T4H, v17, v18); // mls v16.4H, v17.4H, v18.4H 774 __ mlsv(v3, __ T8H, v4, v5); // mls v3.8H, v4.8H, v5.8H 775 __ mlsv(v1, __ T2S, v2, v3); // mls v1.2S, v2.2S, v3.2S 776 __ mlsv(v11, __ T4S, v12, v13); // mls v11.4S, v12.4S, v13.4S 777 __ fmls(v30, __ T2S, v31, v0); // fmls v30.2S, v31.2S, v0.2S 778 __ fmls(v5, __ T4S, v6, v7); // fmls v5.4S, v6.4S, v7.4S 779 __ fmls(v8, __ T2D, v9, v10); // fmls v8.2D, v9.2D, v10.2D 780 __ fmls(v15, __ T4H, v16, v17); // fmls v15.4H, v16.4H, v17.4H 781 __ fmls(v29, __ T8H, v30, v31); // fmls v29.8H, v30.8H, v31.8H 782 __ fdiv(v30, __ T2S, v31, v0); // fdiv v30.2S, v31.2S, v0.2S 783 __ fdiv(v0, __ T4S, v1, v2); // fdiv v0.4S, v1.4S, v2.4S 784 __ fdiv(v20, __ T2D, v21, v22); // fdiv v20.2D, v21.2D, v22.2D 785 __ fdiv(v7, __ T4H, v8, v9); // fdiv v7.4H, v8.4H, v9.4H 786 __ fdiv(v20, __ T8H, v21, v22); // fdiv v20.8H, v21.8H, v22.8H 787 __ maxv(v23, __ T8B, v24, v25); // smax v23.8B, v24.8B, v25.8B 788 __ maxv(v28, __ T16B, v29, v30); // smax v28.16B, v29.16B, v30.16B 789 __ maxv(v21, __ T4H, v22, v23); // smax v21.4H, v22.4H, v23.4H 790 __ maxv(v27, __ T8H, v28, v29); // smax v27.8H, v28.8H, v29.8H 791 __ maxv(v25, __ T2S, v26, v27); // smax v25.2S, v26.2S, v27.2S 792 __ maxv(v5, __ T4S, v6, v7); // smax v5.4S, v6.4S, v7.4S 793 __ umaxv(v1, __ T8B, v2, v3); // umax v1.8B, v2.8B, v3.8B 794 __ umaxv(v23, __ T16B, v24, v25); // umax v23.16B, v24.16B, v25.16B 795 __ umaxv(v16, __ T4H, v17, v18); // umax v16.4H, v17.4H, v18.4H 796 __ umaxv(v31, __ T8H, v0, v1); // umax v31.8H, v0.8H, v1.8H 797 __ umaxv(v5, __ T2S, v6, v7); // umax v5.2S, v6.2S, v7.2S 798 __ umaxv(v12, __ T4S, v13, v14); // umax v12.4S, v13.4S, v14.4S 799 __ smaxp(v9, __ T8B, v10, v11); // smaxp v9.8B, v10.8B, v11.8B 800 __ smaxp(v28, __ T16B, v29, v30); // smaxp v28.16B, v29.16B, v30.16B 801 __ smaxp(v15, __ T4H, v16, v17); // smaxp v15.4H, v16.4H, v17.4H 802 __ smaxp(v29, __ T8H, v30, v31); // smaxp v29.8H, v30.8H, v31.8H 803 __ smaxp(v22, __ T2S, v23, v24); // smaxp v22.2S, v23.2S, v24.2S 804 __ smaxp(v31, __ T4S, v0, v1); // smaxp v31.4S, v0.4S, v1.4S 805 __ fmax(v19, __ T2S, v20, v21); // fmax v19.2S, v20.2S, v21.2S 806 __ fmax(v31, __ T4S, v0, v1); // fmax v31.4S, v0.4S, v1.4S 807 __ fmax(v5, __ T2D, v6, v7); // fmax v5.2D, v6.2D, v7.2D 808 __ fmax(v14, __ T4H, v15, v16); // fmax v14.4H, v15.4H, v16.4H 809 __ fmax(v18, __ T8H, v19, v20); // fmax v18.8H, v19.8H, v20.8H 810 __ minv(v31, __ T8B, v0, v1); // smin v31.8B, v0.8B, v1.8B 811 __ minv(v18, __ T16B, v19, v20); // smin v18.16B, v19.16B, v20.16B 812 __ minv(v27, __ T4H, v28, v29); // smin v27.4H, v28.4H, v29.4H 813 __ minv(v20, __ T8H, v21, v22); // smin v20.8H, v21.8H, v22.8H 814 __ minv(v16, __ T2S, v17, v18); // smin v16.2S, v17.2S, v18.2S 815 __ minv(v12, __ T4S, v13, v14); // smin v12.4S, v13.4S, v14.4S 816 __ uminv(v11, __ T8B, v12, v13); // umin v11.8B, v12.8B, v13.8B 817 __ uminv(v9, __ T16B, v10, v11); // umin v9.16B, v10.16B, v11.16B 818 __ uminv(v6, __ T4H, v7, v8); // umin v6.4H, v7.4H, v8.4H 819 __ uminv(v30, __ T8H, v31, v0); // umin v30.8H, v31.8H, v0.8H 820 __ uminv(v17, __ T2S, v18, v19); // umin v17.2S, v18.2S, v19.2S 821 __ uminv(v27, __ T4S, v28, v29); // umin v27.4S, v28.4S, v29.4S 822 __ sminp(v28, __ T8B, v29, v30); // sminp v28.8B, v29.8B, v30.8B 823 __ sminp(v30, __ T16B, v31, v0); // sminp v30.16B, v31.16B, v0.16B 824 __ sminp(v7, __ T4H, v8, v9); // sminp v7.4H, v8.4H, v9.4H 825 __ sminp(v10, __ T8H, v11, v12); // sminp v10.8H, v11.8H, v12.8H 826 __ sminp(v20, __ T2S, v21, v22); // sminp v20.2S, v21.2S, v22.2S 827 __ sminp(v10, __ T4S, v11, v12); // sminp v10.4S, v11.4S, v12.4S 828 __ sqdmulh(v4, __ T4H, v5, v6); // sqdmulh v4.4H, v5.4H, v6.4H 829 __ sqdmulh(v24, __ T8H, v25, v26); // sqdmulh v24.8H, v25.8H, v26.8H 830 __ sqdmulh(v17, __ T2S, v18, v19); // sqdmulh v17.2S, v18.2S, v19.2S 831 __ sqdmulh(v17, __ T4S, v18, v19); // sqdmulh v17.4S, v18.4S, v19.4S 832 __ shsubv(v22, __ T8B, v23, v24); // shsub v22.8B, v23.8B, v24.8B 833 __ shsubv(v3, __ T16B, v4, v5); // shsub v3.16B, v4.16B, v5.16B 834 __ shsubv(v29, __ T4H, v30, v31); // shsub v29.4H, v30.4H, v31.4H 835 __ shsubv(v15, __ T8H, v16, v17); // shsub v15.8H, v16.8H, v17.8H 836 __ shsubv(v22, __ T2S, v23, v24); // shsub v22.2S, v23.2S, v24.2S 837 __ shsubv(v19, __ T4S, v20, v21); // shsub v19.4S, v20.4S, v21.4S 838 __ fmin(v19, __ T2S, v20, v21); // fmin v19.2S, v20.2S, v21.2S 839 __ fmin(v22, __ T4S, v23, v24); // fmin v22.4S, v23.4S, v24.4S 840 __ fmin(v2, __ T2D, v3, v4); // fmin v2.2D, v3.2D, v4.2D 841 __ fmin(v15, __ T4H, v16, v17); // fmin v15.4H, v16.4H, v17.4H 842 __ fmin(v6, __ T8H, v7, v8); // fmin v6.8H, v7.8H, v8.8H 843 __ facgt(v12, __ T2S, v13, v14); // facgt v12.2S, v13.2S, v14.2S 844 __ facgt(v16, __ T4S, v17, v18); // facgt v16.4S, v17.4S, v18.4S 845 __ facgt(v11, __ T2D, v12, v13); // facgt v11.2D, v12.2D, v13.2D 846 __ facgt(v13, __ T4H, v14, v15); // facgt v13.4H, v14.4H, v15.4H 847 __ facgt(v23, __ T8H, v24, v25); // facgt v23.8H, v24.8H, v25.8H 848 849 // VectorScalarNEONInstruction 850 __ fmlavs(v15, __ T2S, v0, v1, 0); // fmla v15.2S, v0.2S, v1.S[0] 851 __ mulvs(v2, __ T4S, v3, v4, 2); // mul v2.4S, v3.4S, v4.S[2] 852 __ fmlavs(v1, __ T2D, v2, v3, 1); // fmla v1.2D, v2.2D, v3.D[1] 853 __ fmlsvs(v11, __ T2S, v12, v13, 1); // fmls v11.2S, v12.2S, v13.S[1] 854 __ mulvs(v5, __ T4S, v6, v7, 1); // mul v5.4S, v6.4S, v7.S[1] 855 __ fmlsvs(v14, __ T2D, v15, v16, 1); // fmls v14.2D, v15.2D, v16.D[1] 856 __ fmulxvs(v6, __ T2S, v7, v8, 1); // fmulx v6.2S, v7.2S, v8.S[1] 857 __ mulvs(v1, __ T4S, v2, v3, 3); // mul v1.4S, v2.4S, v3.S[3] 858 __ fmulxvs(v15, __ T2D, v0, v1, 0); // fmulx v15.2D, v0.2D, v1.D[0] 859 __ mulvs(v9, __ T4H, v10, v11, 3); // mul v9.4H, v10.4H, v11.H[3] 860 __ mulvs(v4, __ T8H, v5, v6, 4); // mul v4.8H, v5.8H, v6.H[4] 861 __ mulvs(v13, __ T2S, v14, v15, 1); // mul v13.2S, v14.2S, v15.S[1] 862 __ mulvs(v3, __ T4S, v4, v5, 1); // mul v3.4S, v4.4S, v5.S[1] 863 864 // NEONVectorCompare 865 __ cm(Assembler::GT, v21, __ T8B, v22, v23); // cmgt v21.8B, v22.8B, v23.8B 866 __ cm(Assembler::GT, v23, __ T16B, v24, v25); // cmgt v23.16B, v24.16B, v25.16B 867 __ cm(Assembler::GT, v31, __ T4H, v0, v1); // cmgt v31.4H, v0.4H, v1.4H 868 __ cm(Assembler::GT, v25, __ T8H, v26, v27); // cmgt v25.8H, v26.8H, v27.8H 869 __ cm(Assembler::GT, v2, __ T2S, v3, v4); // cmgt v2.2S, v3.2S, v4.2S 870 __ cm(Assembler::GT, v31, __ T4S, v0, v1); // cmgt v31.4S, v0.4S, v1.4S 871 __ cm(Assembler::GT, v27, __ T2D, v28, v29); // cmgt v27.2D, v28.2D, v29.2D 872 __ cm(Assembler::GE, v18, __ T8B, v19, v20); // cmge v18.8B, v19.8B, v20.8B 873 __ cm(Assembler::GE, v10, __ T16B, v11, v12); // cmge v10.16B, v11.16B, v12.16B 874 __ cm(Assembler::GE, v23, __ T4H, v24, v25); // cmge v23.4H, v24.4H, v25.4H 875 __ cm(Assembler::GE, v19, __ T8H, v20, v21); // cmge v19.8H, v20.8H, v21.8H 876 __ cm(Assembler::GE, v3, __ T2S, v4, v5); // cmge v3.2S, v4.2S, v5.2S 877 __ cm(Assembler::GE, v18, __ T4S, v19, v20); // cmge v18.4S, v19.4S, v20.4S 878 __ cm(Assembler::GE, v0, __ T2D, v1, v2); // cmge v0.2D, v1.2D, v2.2D 879 __ cm(Assembler::EQ, v25, __ T8B, v26, v27); // cmeq v25.8B, v26.8B, v27.8B 880 __ cm(Assembler::EQ, v26, __ T16B, v27, v28); // cmeq v26.16B, v27.16B, v28.16B 881 __ cm(Assembler::EQ, v23, __ T4H, v24, v25); // cmeq v23.4H, v24.4H, v25.4H 882 __ cm(Assembler::EQ, v2, __ T8H, v3, v4); // cmeq v2.8H, v3.8H, v4.8H 883 __ cm(Assembler::EQ, v18, __ T2S, v19, v20); // cmeq v18.2S, v19.2S, v20.2S 884 __ cm(Assembler::EQ, v12, __ T4S, v13, v14); // cmeq v12.4S, v13.4S, v14.4S 885 __ cm(Assembler::EQ, v4, __ T2D, v5, v6); // cmeq v4.2D, v5.2D, v6.2D 886 __ cm(Assembler::HI, v28, __ T8B, v29, v30); // cmhi v28.8B, v29.8B, v30.8B 887 __ cm(Assembler::HI, v30, __ T16B, v31, v0); // cmhi v30.16B, v31.16B, v0.16B 888 __ cm(Assembler::HI, v29, __ T4H, v30, v31); // cmhi v29.4H, v30.4H, v31.4H 889 __ cm(Assembler::HI, v16, __ T8H, v17, v18); // cmhi v16.8H, v17.8H, v18.8H 890 __ cm(Assembler::HI, v27, __ T2S, v28, v29); // cmhi v27.2S, v28.2S, v29.2S 891 __ cm(Assembler::HI, v6, __ T4S, v7, v8); // cmhi v6.4S, v7.4S, v8.4S 892 __ cm(Assembler::HI, v9, __ T2D, v10, v11); // cmhi v9.2D, v10.2D, v11.2D 893 __ cm(Assembler::HS, v29, __ T8B, v30, v31); // cmhs v29.8B, v30.8B, v31.8B 894 __ cm(Assembler::HS, v18, __ T16B, v19, v20); // cmhs v18.16B, v19.16B, v20.16B 895 __ cm(Assembler::HS, v7, __ T4H, v8, v9); // cmhs v7.4H, v8.4H, v9.4H 896 __ cm(Assembler::HS, v4, __ T8H, v5, v6); // cmhs v4.8H, v5.8H, v6.8H 897 __ cm(Assembler::HS, v7, __ T2S, v8, v9); // cmhs v7.2S, v8.2S, v9.2S 898 __ cm(Assembler::HS, v15, __ T4S, v16, v17); // cmhs v15.4S, v16.4S, v17.4S 899 __ cm(Assembler::HS, v9, __ T2D, v10, v11); // cmhs v9.2D, v10.2D, v11.2D 900 __ fcm(Assembler::EQ, v23, __ T2S, v24, v25); // fcmeq v23.2S, v24.2S, v25.2S 901 __ fcm(Assembler::EQ, v8, __ T4S, v9, v10); // fcmeq v8.4S, v9.4S, v10.4S 902 __ fcm(Assembler::EQ, v2, __ T2D, v3, v4); // fcmeq v2.2D, v3.2D, v4.2D 903 __ fcm(Assembler::GT, v28, __ T2S, v29, v30); // fcmgt v28.2S, v29.2S, v30.2S 904 __ fcm(Assembler::GT, v21, __ T4S, v22, v23); // fcmgt v21.4S, v22.4S, v23.4S 905 __ fcm(Assembler::GT, v31, __ T2D, v0, v1); // fcmgt v31.2D, v0.2D, v1.2D 906 __ fcm(Assembler::GE, v5, __ T2S, v6, v7); // fcmge v5.2S, v6.2S, v7.2S 907 __ fcm(Assembler::GE, v27, __ T4S, v28, v29); // fcmge v27.4S, v28.4S, v29.4S 908 __ fcm(Assembler::GE, v0, __ T2D, v1, v2); // fcmge v0.2D, v1.2D, v2.2D 909 910 // SVEComparisonWithZero 911 __ sve_fcm(Assembler::EQ, p8, __ S, p6, z15, 0.0); // fcmeq p8.s, p6/z, z15.s, #0.0 912 __ sve_fcm(Assembler::GT, p4, __ D, p6, z28, 0.0); // fcmgt p4.d, p6/z, z28.d, #0.0 913 __ sve_fcm(Assembler::GE, p13, __ D, p0, z25, 0.0); // fcmge p13.d, p0/z, z25.d, #0.0 914 __ sve_fcm(Assembler::LT, p2, __ D, p0, z6, 0.0); // fcmlt p2.d, p0/z, z6.d, #0.0 915 __ sve_fcm(Assembler::LE, p2, __ S, p2, z15, 0.0); // fcmle p2.s, p2/z, z15.s, #0.0 916 __ sve_fcm(Assembler::NE, p3, __ S, p7, z5, 0.0); // fcmne p3.s, p7/z, z5.s, #0.0 917 918 // SVEComparisonWithImm 919 __ sve_cmp(Assembler::EQ, p3, __ S, p5, z20, -10); // cmpeq p3.s, p5/z, z20.s, #-10 920 __ sve_cmp(Assembler::GT, p5, __ S, p7, z8, -10); // cmpgt p5.s, p7/z, z8.s, #-10 921 __ sve_cmp(Assembler::GE, p8, __ H, p7, z2, 13); // cmpge p8.h, p7/z, z2.h, #13 922 __ sve_cmp(Assembler::LT, p1, __ S, p7, z27, -2); // cmplt p1.s, p7/z, z27.s, #-2 923 __ sve_cmp(Assembler::LE, p6, __ S, p6, z28, -11); // cmple p6.s, p6/z, z28.s, #-11 924 __ sve_cmp(Assembler::NE, p1, __ H, p4, z14, -5); // cmpne p1.h, p4/z, z14.h, #-5 925 __ sve_cmp(Assembler::HS, p13, __ H, p1, z23, 90); // cmphs p13.h, p1/z, z23.h, #90 926 __ sve_cmp(Assembler::HI, p8, __ B, p4, z4, 66); // cmphi p8.b, p4/z, z4.b, #66 927 __ sve_cmp(Assembler::LS, p9, __ H, p3, z13, 11); // cmpls p9.h, p3/z, z13.h, #11 928 __ sve_cmp(Assembler::LO, p8, __ S, p5, z3, 21); // cmplo p8.s, p5/z, z3.s, #21 929 930 // SpecialCases 931 __ ccmn(zr, zr, 3u, Assembler::LE); // ccmn xzr, xzr, #3, LE 932 __ ccmnw(zr, zr, 5u, Assembler::EQ); // ccmn wzr, wzr, #5, EQ 933 __ ccmp(zr, 1, 4u, Assembler::NE); // ccmp xzr, 1, #4, NE 934 __ ccmpw(zr, 2, 2, Assembler::GT); // ccmp wzr, 2, #2, GT 935 __ extr(zr, zr, zr, 0); // extr xzr, xzr, xzr, 0 936 __ stlxp(r0, zr, zr, sp); // stlxp w0, xzr, xzr, [sp] 937 __ stlxpw(r2, zr, zr, r3); // stlxp w2, wzr, wzr, [x3] 938 __ stxp(r4, zr, zr, r5); // stxp w4, xzr, xzr, [x5] 939 __ stxpw(r6, zr, zr, sp); // stxp w6, wzr, wzr, [sp] 940 __ dup(v0, __ T16B, zr); // dup v0.16b, wzr 941 __ dup(v0, __ S, v1); // dup s0, v1.s[0] 942 __ mov(v1, __ D, 0, zr); // mov v1.d[0], xzr 943 __ mov(v1, __ S, 1, zr); // mov v1.s[1], wzr 944 __ mov(v1, __ H, 2, zr); // mov v1.h[2], wzr 945 __ mov(v1, __ B, 3, zr); // mov v1.b[3], wzr 946 __ smov(r0, v1, __ S, 0); // smov x0, v1.s[0] 947 __ smov(r0, v1, __ H, 1); // smov x0, v1.h[1] 948 __ smov(r0, v1, __ B, 2); // smov x0, v1.b[2] 949 __ umov(r0, v1, __ D, 0); // umov x0, v1.d[0] 950 __ umov(r0, v1, __ S, 1); // umov w0, v1.s[1] 951 __ umov(r0, v1, __ H, 2); // umov w0, v1.h[2] 952 __ umov(r0, v1, __ B, 3); // umov w0, v1.b[3] 953 __ fmovhid(r0, v1); // fmov x0, v1.d[1] 954 __ fmovs(v9, __ T2S, 0.5f); // fmov v9.2s, 0.5 955 __ fmovd(v14, __ T2D, 0.5f); // fmov v14.2d, 0.5 956 __ ld1(v31, v0, __ T2D, Address(__ post(r1, r0))); // ld1 {v31.2d, v0.2d}, [x1], x0 957 __ fcvtzs(v0, __ T2S, v1); // fcvtzs v0.2s, v1.2s 958 __ fcvtzs(v0, __ T4H, v1); // fcvtzs v0.4h, v1.4h 959 __ fcvtzs(v0, __ T8H, v1); // fcvtzs v0.8h, v1.8h 960 __ fcvtas(v2, __ T4S, v3); // fcvtas v2.4s, v3.4s 961 __ fcvtas(v2, __ T4H, v3); // fcvtas v2.4h, v3.4h 962 __ fcvtas(v2, __ T8H, v3); // fcvtas v2.8h, v3.8h 963 __ fcvtms(v4, __ T2D, v5); // fcvtms v4.2d, v5.2d 964 __ fcvtms(v4, __ T4H, v5); // fcvtms v4.4h, v5.4h 965 __ fcvtms(v4, __ T8H, v5); // fcvtms v4.8h, v5.8h 966 __ sve_cpy(z0, __ S, p0, v1); // mov z0.s, p0/m, s1 967 __ sve_cpy(z0, __ B, p0, 127, true); // mov z0.b, p0/m, 127 968 __ sve_cpy(z1, __ H, p0, -128, true); // mov z1.h, p0/m, -128 969 __ sve_cpy(z2, __ S, p0, 32512, true); // mov z2.s, p0/m, 32512 970 __ sve_cpy(z5, __ D, p0, -32768, false); // mov z5.d, p0/z, -32768 971 __ sve_cpy(z10, __ B, p0, -1, false); // mov z10.b, p0/z, -1 972 __ sve_cpy(z11, __ S, p0, -1, false); // mov z11.s, p0/z, -1 973 __ sve_inc(r0, __ S); // incw x0 974 __ sve_dec(r1, __ H); // dech x1 975 __ sve_lsl(z0, __ B, z1, 7); // lsl z0.b, z1.b, #7 976 __ sve_lsl(z21, __ H, z1, 15); // lsl z21.h, z1.h, #15 977 __ sve_lsl(z0, __ S, z1, 31); // lsl z0.s, z1.s, #31 978 __ sve_lsl(z0, __ D, z1, 63); // lsl z0.d, z1.d, #63 979 __ sve_lsr(z0, __ B, z1, 7); // lsr z0.b, z1.b, #7 980 __ sve_asr(z0, __ H, z11, 15); // asr z0.h, z11.h, #15 981 __ sve_lsr(z30, __ S, z1, 31); // lsr z30.s, z1.s, #31 982 __ sve_asr(z0, __ D, z1, 63); // asr z0.d, z1.d, #63 983 __ sve_lsl(z0, __ B, p0, 0); // lsl z0.b, p0/m, z0.b, #0 984 __ sve_lsl(z0, __ B, p0, 5); // lsl z0.b, p0/m, z0.b, #5 985 __ sve_lsl(z1, __ H, p1, 15); // lsl z1.h, p1/m, z1.h, #15 986 __ sve_lsl(z2, __ S, p2, 31); // lsl z2.s, p2/m, z2.s, #31 987 __ sve_lsl(z3, __ D, p3, 63); // lsl z3.d, p3/m, z3.d, #63 988 __ sve_lsr(z0, __ B, p0, 1); // lsr z0.b, p0/m, z0.b, #1 989 __ sve_lsr(z0, __ B, p0, 8); // lsr z0.b, p0/m, z0.b, #8 990 __ sve_lsr(z1, __ H, p1, 15); // lsr z1.h, p1/m, z1.h, #15 991 __ sve_lsr(z2, __ S, p2, 7); // lsr z2.s, p2/m, z2.s, #7 992 __ sve_lsr(z2, __ S, p2, 31); // lsr z2.s, p2/m, z2.s, #31 993 __ sve_lsr(z3, __ D, p3, 63); // lsr z3.d, p3/m, z3.d, #63 994 __ sve_asr(z0, __ B, p0, 1); // asr z0.b, p0/m, z0.b, #1 995 __ sve_asr(z0, __ B, p0, 7); // asr z0.b, p0/m, z0.b, #7 996 __ sve_asr(z1, __ H, p1, 5); // asr z1.h, p1/m, z1.h, #5 997 __ sve_asr(z1, __ H, p1, 15); // asr z1.h, p1/m, z1.h, #15 998 __ sve_asr(z2, __ S, p2, 31); // asr z2.s, p2/m, z2.s, #31 999 __ sve_asr(z3, __ D, p3, 63); // asr z3.d, p3/m, z3.d, #63 1000 __ sve_addvl(sp, r0, 31); // addvl sp, x0, #31 1001 __ sve_addpl(r1, sp, -32); // addpl x1, sp, -32 1002 __ sve_cntp(r8, __ B, p0, p1); // cntp x8, p0, p1.b 1003 __ sve_dup(z0, __ B, 127); // dup z0.b, 127 1004 __ sve_dup(z1, __ H, -128); // dup z1.h, -128 1005 __ sve_dup(z2, __ S, 32512); // dup z2.s, 32512 1006 __ sve_dup(z7, __ D, -32768); // dup z7.d, -32768 1007 __ sve_dup(z10, __ B, -1); // dup z10.b, -1 1008 __ sve_dup(z11, __ S, -1); // dup z11.s, -1 1009 __ sve_ld1b(z0, __ B, p0, Address(sp)); // ld1b {z0.b}, p0/z, [sp] 1010 __ sve_ld1b(z0, __ H, p1, Address(sp)); // ld1b {z0.h}, p1/z, [sp] 1011 __ sve_ld1b(z0, __ S, p2, Address(sp, r8)); // ld1b {z0.s}, p2/z, [sp, x8] 1012 __ sve_ld1b(z0, __ D, p3, Address(sp, 7)); // ld1b {z0.d}, p3/z, [sp, #7, MUL VL] 1013 __ sve_ld1h(z10, __ H, p1, Address(sp, -8)); // ld1h {z10.h}, p1/z, [sp, #-8, MUL VL] 1014 __ sve_ld1w(z20, __ S, p2, Address(r0, 7)); // ld1w {z20.s}, p2/z, [x0, #7, MUL VL] 1015 __ sve_ld1b(z30, __ B, p3, Address(sp, r8)); // ld1b {z30.b}, p3/z, [sp, x8] 1016 __ sve_ld1w(z0, __ S, p4, Address(sp, r28)); // ld1w {z0.s}, p4/z, [sp, x28, LSL #2] 1017 __ sve_ld1d(z11, __ D, p5, Address(r0, r1)); // ld1d {z11.d}, p5/z, [x0, x1, LSL #3] 1018 __ sve_st1b(z22, __ B, p6, Address(sp)); // st1b {z22.b}, p6, [sp] 1019 __ sve_st1b(z31, __ B, p7, Address(sp, -8)); // st1b {z31.b}, p7, [sp, #-8, MUL VL] 1020 __ sve_st1b(z0, __ H, p1, Address(sp)); // st1b {z0.h}, p1, [sp] 1021 __ sve_st1b(z0, __ S, p2, Address(sp, r8)); // st1b {z0.s}, p2, [sp, x8] 1022 __ sve_st1b(z0, __ D, p3, Address(sp)); // st1b {z0.d}, p3, [sp] 1023 __ sve_st1w(z0, __ S, p1, Address(r0, 7)); // st1w {z0.s}, p1, [x0, #7, MUL VL] 1024 __ sve_st1b(z0, __ B, p2, Address(sp, r1)); // st1b {z0.b}, p2, [sp, x1] 1025 __ sve_st1h(z0, __ H, p3, Address(sp, r8)); // st1h {z0.h}, p3, [sp, x8, LSL #1] 1026 __ sve_st1d(z0, __ D, p4, Address(r0, r17)); // st1d {z0.d}, p4, [x0, x17, LSL #3] 1027 __ sve_ldr(z0, Address(sp)); // ldr z0, [sp] 1028 __ sve_ldr(z31, Address(sp, -256)); // ldr z31, [sp, #-256, MUL VL] 1029 __ sve_str(z8, Address(r8, 255)); // str z8, [x8, #255, MUL VL] 1030 __ sve_cntb(r9); // cntb x9 1031 __ sve_cnth(r10); // cnth x10 1032 __ sve_cntw(r11); // cntw x11 1033 __ sve_cntd(r12); // cntd x12 1034 __ sve_brka(p2, p0, p2, false); // brka p2.b, p0/z, p2.b 1035 __ sve_brka(p1, p2, p3, true); // brka p1.b, p2/m, p3.b 1036 __ sve_brkb(p1, p2, p3, false); // brkb p1.b, p2/z, p3.b 1037 __ sve_brkb(p2, p3, p4, true); // brkb p2.b, p3/m, p4.b 1038 __ sve_rev(p0, __ B, p1); // rev p0.b, p1.b 1039 __ sve_rev(p1, __ H, p2); // rev p1.h, p2.h 1040 __ sve_rev(p2, __ S, p3); // rev p2.s, p3.s 1041 __ sve_rev(p3, __ D, p4); // rev p3.d, p4.d 1042 __ sve_incp(r0, __ B, p2); // incp x0, p2.b 1043 __ sve_whilelt(p0, __ B, r1, r28); // whilelt p0.b, x1, x28 1044 __ sve_whilele(p2, __ H, r11, r8); // whilele p2.h, x11, x8 1045 __ sve_whilelo(p3, __ S, r7, r2); // whilelo p3.s, x7, x2 1046 __ sve_whilels(p4, __ D, r17, r10); // whilels p4.d, x17, x10 1047 __ sve_whileltw(p1, __ B, r1, r28); // whilelt p1.b, w1, w28 1048 __ sve_whilelew(p2, __ H, r11, r8); // whilele p2.h, w11, w8 1049 __ sve_whilelow(p3, __ S, r7, r2); // whilelo p3.s, w7, w2 1050 __ sve_whilelsw(p4, __ D, r17, r10); // whilels p4.d, w17, w10 1051 __ sve_sel(z0, __ B, p0, z1, z2); // sel z0.b, p0, z1.b, z2.b 1052 __ sve_sel(z4, __ D, p0, z5, z6); // sel z4.d, p0, z5.d, z6.d 1053 __ sve_cmp(Assembler::EQ, p1, __ B, p0, z0, z1); // cmpeq p1.b, p0/z, z0.b, z1.b 1054 __ sve_cmp(Assembler::NE, p1, __ H, p0, z2, z3); // cmpne p1.h, p0/z, z2.h, z3.h 1055 __ sve_cmp(Assembler::GE, p1, __ S, p2, z4, z5); // cmpge p1.s, p2/z, z4.s, z5.s 1056 __ sve_cmp(Assembler::GT, p1, __ D, p3, z6, z7); // cmpgt p1.d, p3/z, z6.d, z7.d 1057 __ sve_cmp(Assembler::HI, p1, __ S, p2, z4, z5); // cmphi p1.s, p2/z, z4.s, z5.s 1058 __ sve_cmp(Assembler::HS, p1, __ D, p3, z6, z7); // cmphs p1.d, p3/z, z6.d, z7.d 1059 __ sve_cmp(Assembler::EQ, p1, __ B, p4, z0, 15); // cmpeq p1.b, p4/z, z0.b, #15 1060 __ sve_cmp(Assembler::NE, p1, __ H, p0, z2, -16); // cmpne p1.h, p0/z, z2.h, #-16 1061 __ sve_cmp(Assembler::LE, p1, __ S, p1, z4, 0); // cmple p1.s, p1/z, z4.s, #0 1062 __ sve_cmp(Assembler::LT, p1, __ D, p2, z6, -1); // cmplt p1.d, p2/z, z6.d, #-1 1063 __ sve_cmp(Assembler::GE, p1, __ S, p3, z4, 5); // cmpge p1.s, p3/z, z4.s, #5 1064 __ sve_cmp(Assembler::GT, p1, __ B, p4, z6, -2); // cmpgt p1.b, p4/z, z6.b, #-2 1065 __ sve_fcm(Assembler::EQ, p1, __ S, p0, z0, z1); // fcmeq p1.s, p0/z, z0.s, z1.s 1066 __ sve_fcm(Assembler::NE, p1, __ D, p0, z2, z3); // fcmne p1.d, p0/z, z2.d, z3.d 1067 __ sve_fcm(Assembler::GT, p1, __ S, p2, z4, z5); // fcmgt p1.s, p2/z, z4.s, z5.s 1068 __ sve_fcm(Assembler::GE, p1, __ D, p3, z6, z7); // fcmge p1.d, p3/z, z6.d, z7.d 1069 __ sve_uunpkhi(z0, __ H, z1); // uunpkhi z0.h, z1.b 1070 __ sve_uunpklo(z4, __ S, z5); // uunpklo z4.s, z5.h 1071 __ sve_sunpkhi(z6, __ D, z7); // sunpkhi z6.d, z7.s 1072 __ sve_sunpklo(z10, __ H, z11); // sunpklo z10.h, z11.b 1073 __ sve_scvtf(z1, __ D, p0, z0, __ S); // scvtf z1.d, p0/m, z0.s 1074 __ sve_scvtf(z3, __ D, p1, z2, __ D); // scvtf z3.d, p1/m, z2.d 1075 __ sve_scvtf(z6, __ S, p2, z1, __ D); // scvtf z6.s, p2/m, z1.d 1076 __ sve_scvtf(z6, __ S, p3, z1, __ S); // scvtf z6.s, p3/m, z1.s 1077 __ sve_scvtf(z6, __ H, p3, z1, __ S); // scvtf z6.h, p3/m, z1.s 1078 __ sve_scvtf(z6, __ H, p3, z1, __ D); // scvtf z6.h, p3/m, z1.d 1079 __ sve_scvtf(z6, __ H, p3, z1, __ H); // scvtf z6.h, p3/m, z1.h 1080 __ sve_fcvt(z5, __ D, p3, z4, __ S); // fcvt z5.d, p3/m, z4.s 1081 __ sve_fcvt(z1, __ S, p3, z0, __ D); // fcvt z1.s, p3/m, z0.d 1082 __ sve_fcvt(z5, __ S, p3, z4, __ H); // fcvt z5.s, p3/m, z4.h 1083 __ sve_fcvt(z1, __ H, p3, z0, __ S); // fcvt z1.h, p3/m, z0.s 1084 __ sve_fcvt(z5, __ D, p3, z4, __ H); // fcvt z5.d, p3/m, z4.h 1085 __ sve_fcvt(z1, __ H, p3, z0, __ D); // fcvt z1.h, p3/m, z0.d 1086 __ sve_fcvtzs(z19, __ D, p2, z1, __ D); // fcvtzs z19.d, p2/m, z1.d 1087 __ sve_fcvtzs(z9, __ S, p1, z8, __ S); // fcvtzs z9.s, p1/m, z8.s 1088 __ sve_fcvtzs(z1, __ S, p2, z0, __ D); // fcvtzs z1.s, p2/m, z0.d 1089 __ sve_fcvtzs(z1, __ D, p3, z0, __ S); // fcvtzs z1.d, p3/m, z0.s 1090 __ sve_fcvtzs(z1, __ S, p4, z18, __ H); // fcvtzs z1.s, p4/m, z18.h 1091 __ sve_lasta(r0, __ B, p0, z15); // lasta w0, p0, z15.b 1092 __ sve_lastb(r1, __ B, p1, z16); // lastb w1, p1, z16.b 1093 __ sve_lasta(v0, __ B, p0, z15); // lasta b0, p0, z15.b 1094 __ sve_lastb(v1, __ B, p1, z16); // lastb b1, p1, z16.b 1095 __ sve_index(z6, __ S, 1, 1); // index z6.s, #1, #1 1096 __ sve_index(z6, __ B, r5, 2); // index z6.b, w5, #2 1097 __ sve_index(z6, __ H, r5, 3); // index z6.h, w5, #3 1098 __ sve_index(z6, __ S, r5, 4); // index z6.s, w5, #4 1099 __ sve_index(z7, __ D, r5, 5); // index z7.d, x5, #5 1100 __ sve_cpy(z7, __ H, p3, r5); // cpy z7.h, p3/m, w5 1101 __ sve_tbl(z16, __ S, z17, z18); // tbl z16.s, {z17.s}, z18.s 1102 __ sve_ld1w_gather(z15, p0, r5, z16); // ld1w {z15.s}, p0/z, [x5, z16.s, uxtw #2] 1103 __ sve_ld1d_gather(z15, p0, r5, z16); // ld1d {z15.d}, p0/z, [x5, z16.d, uxtw #3] 1104 __ sve_st1w_scatter(z15, p0, r5, z16); // st1w {z15.s}, p0, [x5, z16.s, uxtw #2] 1105 __ sve_st1d_scatter(z15, p0, r5, z16); // st1d {z15.d}, p0, [x5, z16.d, uxtw #3] 1106 __ sve_and(p0, p1, p2, p3); // and p0.b, p1/z, p2.b, p3.b 1107 __ sve_ands(p4, p5, p6, p0); // ands p4.b, p5/z, p6.b, p0.b 1108 __ sve_eor(p0, p1, p2, p3); // eor p0.b, p1/z, p2.b, p3.b 1109 __ sve_eors(p5, p6, p0, p1); // eors p5.b, p6/z, p0.b, p1.b 1110 __ sve_orr(p0, p1, p2, p3); // orr p0.b, p1/z, p2.b, p3.b 1111 __ sve_orrs(p9, p1, p4, p5); // orrs p9.b, p1/z, p4.b, p5.b 1112 __ sve_bic(p10, p7, p9, p11); // bic p10.b, p7/z, p9.b, p11.b 1113 __ sve_ptest(p7, p1); // ptest p7, p1.b 1114 __ sve_ptrue(p1, __ B); // ptrue p1.b 1115 __ sve_ptrue(p1, __ B, 0b00001); // ptrue p1.b, vl1 1116 __ sve_ptrue(p1, __ B, 0b00101); // ptrue p1.b, vl5 1117 __ sve_ptrue(p1, __ B, 0b01001); // ptrue p1.b, vl16 1118 __ sve_ptrue(p1, __ B, 0b01101); // ptrue p1.b, vl256 1119 __ sve_ptrue(p2, __ H); // ptrue p2.h 1120 __ sve_ptrue(p2, __ H, 0b00010); // ptrue p2.h, vl2 1121 __ sve_ptrue(p2, __ H, 0b00110); // ptrue p2.h, vl6 1122 __ sve_ptrue(p2, __ H, 0b01010); // ptrue p2.h, vl32 1123 __ sve_ptrue(p3, __ S); // ptrue p3.s 1124 __ sve_ptrue(p3, __ S, 0b00011); // ptrue p3.s, vl3 1125 __ sve_ptrue(p3, __ S, 0b00111); // ptrue p3.s, vl7 1126 __ sve_ptrue(p3, __ S, 0b01011); // ptrue p3.s, vl64 1127 __ sve_ptrue(p4, __ D); // ptrue p4.d 1128 __ sve_ptrue(p4, __ D, 0b00100); // ptrue p4.d, vl4 1129 __ sve_ptrue(p4, __ D, 0b01000); // ptrue p4.d, vl8 1130 __ sve_ptrue(p4, __ D, 0b01100); // ptrue p4.d, vl128 1131 __ sve_pfalse(p7); // pfalse p7.b 1132 __ sve_uzp1(p0, __ B, p0, p1); // uzp1 p0.b, p0.b, p1.b 1133 __ sve_uzp1(p0, __ H, p0, p1); // uzp1 p0.h, p0.h, p1.h 1134 __ sve_uzp1(p0, __ S, p0, p1); // uzp1 p0.s, p0.s, p1.s 1135 __ sve_uzp1(p0, __ D, p0, p1); // uzp1 p0.d, p0.d, p1.d 1136 __ sve_uzp2(p0, __ B, p0, p1); // uzp2 p0.b, p0.b, p1.b 1137 __ sve_uzp2(p0, __ H, p0, p1); // uzp2 p0.h, p0.h, p1.h 1138 __ sve_uzp2(p0, __ S, p0, p1); // uzp2 p0.s, p0.s, p1.s 1139 __ sve_uzp2(p0, __ D, p0, p1); // uzp2 p0.d, p0.d, p1.d 1140 __ sve_punpklo(p1, p0); // punpklo p1.h, p0.b 1141 __ sve_punpkhi(p1, p0); // punpkhi p1.h, p0.b 1142 __ sve_compact(z16, __ S, z16, p1); // compact z16.s, p1, z16.s 1143 __ sve_compact(z16, __ D, z16, p1); // compact z16.d, p1, z16.d 1144 __ sve_ext(z17, z16, 63); // ext z17.b, z17.b, z16.b, #63 1145 __ sve_fac(Assembler::GT, p1, __ H, p2, z4, z5); // facgt p1.h, p2/z, z4.h, z5.h 1146 __ sve_fac(Assembler::GT, p1, __ S, p2, z4, z5); // facgt p1.s, p2/z, z4.s, z5.s 1147 __ sve_fac(Assembler::GT, p1, __ D, p2, z4, z5); // facgt p1.d, p2/z, z4.d, z5.d 1148 __ sve_fac(Assembler::GE, p1, __ H, p2, z4, z5); // facge p1.h, p2/z, z4.h, z5.h 1149 __ sve_fac(Assembler::GE, p1, __ S, p2, z4, z5); // facge p1.s, p2/z, z4.s, z5.s 1150 __ sve_fac(Assembler::GE, p1, __ D, p2, z4, z5); // facge p1.d, p2/z, z4.d, z5.d 1151 __ sve_histcnt(z16, __ S, p0, z16, z16); // histcnt z16.s, p0/z, z16.s, z16.s 1152 __ sve_histcnt(z17, __ D, p0, z17, z17); // histcnt z17.d, p0/z, z17.d, z17.d 1153 1154 // FloatImmediateOp 1155 __ fmovd(v0, 2.0); // fmov d0, #2.0 1156 __ fmovd(v0, 2.125); // fmov d0, #2.125 1157 __ fmovd(v0, 4.0); // fmov d0, #4.0 1158 __ fmovd(v0, 4.25); // fmov d0, #4.25 1159 __ fmovd(v0, 8.0); // fmov d0, #8.0 1160 __ fmovd(v0, 8.5); // fmov d0, #8.5 1161 __ fmovd(v0, 16.0); // fmov d0, #16.0 1162 __ fmovd(v0, 17.0); // fmov d0, #17.0 1163 __ fmovd(v0, 0.125); // fmov d0, #0.125 1164 __ fmovd(v0, 0.1328125); // fmov d0, #0.1328125 1165 __ fmovd(v0, 0.25); // fmov d0, #0.25 1166 __ fmovd(v0, 0.265625); // fmov d0, #0.265625 1167 __ fmovd(v0, 0.5); // fmov d0, #0.5 1168 __ fmovd(v0, 0.53125); // fmov d0, #0.53125 1169 __ fmovd(v0, 1.0); // fmov d0, #1.0 1170 __ fmovd(v0, 1.0625); // fmov d0, #1.0625 1171 __ fmovd(v0, -2.0); // fmov d0, #-2.0 1172 __ fmovd(v0, -2.125); // fmov d0, #-2.125 1173 __ fmovd(v0, -4.0); // fmov d0, #-4.0 1174 __ fmovd(v0, -4.25); // fmov d0, #-4.25 1175 __ fmovd(v0, -8.0); // fmov d0, #-8.0 1176 __ fmovd(v0, -8.5); // fmov d0, #-8.5 1177 __ fmovd(v0, -16.0); // fmov d0, #-16.0 1178 __ fmovd(v0, -17.0); // fmov d0, #-17.0 1179 __ fmovd(v0, -0.125); // fmov d0, #-0.125 1180 __ fmovd(v0, -0.1328125); // fmov d0, #-0.1328125 1181 __ fmovd(v0, -0.25); // fmov d0, #-0.25 1182 __ fmovd(v0, -0.265625); // fmov d0, #-0.265625 1183 __ fmovd(v0, -0.5); // fmov d0, #-0.5 1184 __ fmovd(v0, -0.53125); // fmov d0, #-0.53125 1185 __ fmovd(v0, -1.0); // fmov d0, #-1.0 1186 __ fmovd(v0, -1.0625); // fmov d0, #-1.0625 1187 1188 // LSEOp 1189 __ swp(Assembler::xword, r6, r7, r19); // swp x6, x7, [x19] 1190 __ ldadd(Assembler::xword, r13, r28, r17); // ldadd x13, x28, [x17] 1191 __ ldbic(Assembler::xword, r16, r6, r2); // ldclr x16, x6, [x2] 1192 __ ldeor(Assembler::xword, r29, r3, r4); // ldeor x29, x3, [x4] 1193 __ ldorr(Assembler::xword, r6, r16, r20); // ldset x6, x16, [x20] 1194 __ ldsmin(Assembler::xword, r13, r12, r20); // ldsmin x13, x12, [x20] 1195 __ ldsmax(Assembler::xword, r8, r25, r20); // ldsmax x8, x25, [x20] 1196 __ ldumin(Assembler::xword, r19, r0, r11); // ldumin x19, x0, [x11] 1197 __ ldumax(Assembler::xword, r24, r6, r20); // ldumax x24, x6, [x20] 1198 1199 // LSEOp 1200 __ swpa(Assembler::xword, zr, r14, r16); // swpa xzr, x14, [x16] 1201 __ ldadda(Assembler::xword, r6, r0, r7); // ldadda x6, x0, [x7] 1202 __ ldbica(Assembler::xword, r15, r19, r26); // ldclra x15, x19, [x26] 1203 __ ldeora(Assembler::xword, r9, r10, r23); // ldeora x9, x10, [x23] 1204 __ ldorra(Assembler::xword, r21, r22, r28); // ldseta x21, x22, [x28] 1205 __ ldsmina(Assembler::xword, r2, r3, r15); // ldsmina x2, x3, [x15] 1206 __ ldsmaxa(Assembler::xword, r19, r20, r7); // ldsmaxa x19, x20, [x7] 1207 __ ldumina(Assembler::xword, r4, r29, r7); // ldumina x4, x29, [x7] 1208 __ ldumaxa(Assembler::xword, r0, r9, r16); // ldumaxa x0, x9, [x16] 1209 1210 // LSEOp 1211 __ swpal(Assembler::xword, r20, r23, r4); // swpal x20, x23, [x4] 1212 __ ldaddal(Assembler::xword, r16, r10, r23); // ldaddal x16, x10, [x23] 1213 __ ldbical(Assembler::xword, r11, r25, r6); // ldclral x11, x25, [x6] 1214 __ ldeoral(Assembler::xword, zr, r16, r13); // ldeoral xzr, x16, [x13] 1215 __ ldorral(Assembler::xword, r23, r12, r1); // ldsetal x23, x12, [x1] 1216 __ ldsminal(Assembler::xword, r14, r9, r21); // ldsminal x14, x9, [x21] 1217 __ ldsmaxal(Assembler::xword, r16, r26, r15); // ldsmaxal x16, x26, [x15] 1218 __ lduminal(Assembler::xword, r4, r4, r15); // lduminal x4, x4, [x15] 1219 __ ldumaxal(Assembler::xword, r8, r6, r30); // ldumaxal x8, x6, [x30] 1220 1221 // LSEOp 1222 __ swpl(Assembler::xword, r4, r29, r17); // swpl x4, x29, [x17] 1223 __ ldaddl(Assembler::xword, r29, r26, r9); // ldaddl x29, x26, [x9] 1224 __ ldbicl(Assembler::xword, r15, r2, r11); // ldclrl x15, x2, [x11] 1225 __ ldeorl(Assembler::xword, r29, r3, r7); // ldeorl x29, x3, [x7] 1226 __ ldorrl(Assembler::xword, r1, r27, r21); // ldsetl x1, x27, [x21] 1227 __ ldsminl(Assembler::xword, r16, r14, r8); // ldsminl x16, x14, [x8] 1228 __ ldsmaxl(Assembler::xword, r16, r22, r25); // ldsmaxl x16, x22, [x25] 1229 __ lduminl(Assembler::xword, r5, r20, r21); // lduminl x5, x20, [x21] 1230 __ ldumaxl(Assembler::xword, r16, r23, r16); // ldumaxl x16, x23, [x16] 1231 1232 // LSEOp 1233 __ swp(Assembler::word, r30, r20, r20); // swp w30, w20, [x20] 1234 __ ldadd(Assembler::word, r0, r4, r19); // ldadd w0, w4, [x19] 1235 __ ldbic(Assembler::word, r24, r4, r20); // ldclr w24, w4, [x20] 1236 __ ldeor(Assembler::word, r4, r24, r26); // ldeor w4, w24, [x26] 1237 __ ldorr(Assembler::word, r19, r2, r8); // ldset w19, w2, [x8] 1238 __ ldsmin(Assembler::word, r8, r14, r24); // ldsmin w8, w14, [x24] 1239 __ ldsmax(Assembler::word, r16, zr, r22); // ldsmax w16, wzr, [x22] 1240 __ ldumin(Assembler::word, r4, zr, r1); // ldumin w4, wzr, [x1] 1241 __ ldumax(Assembler::word, r10, r20, r12); // ldumax w10, w20, [x12] 1242 1243 // LSEOp 1244 __ swpa(Assembler::word, r0, r9, r7); // swpa w0, w9, [x7] 1245 __ ldadda(Assembler::word, r24, r16, r4); // ldadda w24, w16, [x4] 1246 __ ldbica(Assembler::word, r27, r6, r10); // ldclra w27, w6, [x10] 1247 __ ldeora(Assembler::word, r27, r24, r13); // ldeora w27, w24, [x13] 1248 __ ldorra(Assembler::word, r16, zr, r22); // ldseta w16, wzr, [x22] 1249 __ ldsmina(Assembler::word, r22, r20, sp); // ldsmina w22, w20, [sp] 1250 __ ldsmaxa(Assembler::word, r29, r9, r14); // ldsmaxa w29, w9, [x14] 1251 __ ldumina(Assembler::word, r20, r7, r20); // ldumina w20, w7, [x20] 1252 __ ldumaxa(Assembler::word, r28, r9, r11); // ldumaxa w28, w9, [x11] 1253 1254 // LSEOp 1255 __ swpal(Assembler::word, r14, r12, r20); // swpal w14, w12, [x20] 1256 __ ldaddal(Assembler::word, r1, r24, r9); // ldaddal w1, w24, [x9] 1257 __ ldbical(Assembler::word, r19, r13, r19); // ldclral w19, w13, [x19] 1258 __ ldeoral(Assembler::word, r16, r16, r5); // ldeoral w16, w16, [x5] 1259 __ ldorral(Assembler::word, r0, r3, r12); // ldsetal w0, w3, [x12] 1260 __ ldsminal(Assembler::word, r8, r15, r15); // ldsminal w8, w15, [x15] 1261 __ ldsmaxal(Assembler::word, r16, r4, r15); // ldsmaxal w16, w4, [x15] 1262 __ lduminal(Assembler::word, r30, r5, r0); // lduminal w30, w5, [x0] 1263 __ ldumaxal(Assembler::word, r10, r22, r27); // ldumaxal w10, w22, [x27] 1264 1265 // LSEOp 1266 __ swpl(Assembler::word, r3, r0, r9); // swpl w3, w0, [x9] 1267 __ ldaddl(Assembler::word, r19, r29, r10); // ldaddl w19, w29, [x10] 1268 __ ldbicl(Assembler::word, r24, r4, r20); // ldclrl w24, w4, [x20] 1269 __ ldeorl(Assembler::word, r7, r24, r29); // ldeorl w7, w24, [x29] 1270 __ ldorrl(Assembler::word, r14, r21, r11); // ldsetl w14, w21, [x11] 1271 __ ldsminl(Assembler::word, r27, r13, r15); // ldsminl w27, w13, [x15] 1272 __ ldsmaxl(Assembler::word, zr, r17, r14); // ldsmaxl wzr, w17, [x14] 1273 __ lduminl(Assembler::word, r3, r30, r16); // lduminl w3, w30, [x16] 1274 __ ldumaxl(Assembler::word, r22, r20, r7); // ldumaxl w22, w20, [x7] 1275 1276 // SHA3SIMDOp 1277 __ bcax(v20, __ T16B, v3, v1, v26); // bcax v20.16B, v3.16B, v1.16B, v26.16B 1278 __ eor3(v19, __ T16B, v9, v16, v17); // eor3 v19.16B, v9.16B, v16.16B, v17.16B 1279 __ rax1(v21, __ T2D, v0, v4); // rax1 v21.2D, v0.2D, v4.2D 1280 __ xar(v2, __ T2D, v24, v14, 12); // xar v2.2D, v24.2D, v14.2D, #12 1281 1282 // SHA512SIMDOp 1283 __ sha512h(v11, __ T2D, v21, v14); // sha512h q11, q21, v14.2D 1284 __ sha512h2(v17, __ T2D, v30, v12); // sha512h2 q17, q30, v12.2D 1285 __ sha512su0(v3, __ T2D, v3); // sha512su0 v3.2D, v3.2D 1286 __ sha512su1(v23, __ T2D, v9, v3); // sha512su1 v23.2D, v9.2D, v3.2D 1287 1288 // SVEBinaryImmOp 1289 __ sve_add(z24, __ D, 26u); // add z24.d, z24.d, #0x1a 1290 __ sve_sub(z19, __ S, 62u); // sub z19.s, z19.s, #0x3e 1291 __ sve_and(z26, __ S, 917504u); // and z26.s, z26.s, #0xe0000 1292 __ sve_eor(z8, __ D, 18442240474082197503u); // eor z8.d, z8.d, #0xfff0000000003fff 1293 __ sve_orr(z18, __ S, 253952u); // orr z18.s, z18.s, #0x3e000 1294 1295 // SVEBinaryImmOp 1296 __ sve_add(z9, __ S, 97u); // add z9.s, z9.s, #0x61 1297 __ sve_sub(z8, __ H, 118u); // sub z8.h, z8.h, #0x76 1298 __ sve_and(z19, __ S, 1056980736u); // and z19.s, z19.s, #0x3f003f00 1299 __ sve_eor(z25, __ S, 3758350339u); // eor z25.s, z25.s, #0xe003e003 1300 __ sve_orr(z9, __ S, 4294459391u); // orr z9.s, z9.s, #0xfff83fff 1301 1302 // SVEBinaryImmOp 1303 __ sve_add(z23, __ D, 183u); // add z23.d, z23.d, #0xb7 1304 __ sve_sub(z8, __ H, 41u); // sub z8.h, z8.h, #0x29 1305 __ sve_and(z28, __ D, 8064u); // and z28.d, z28.d, #0x1f80 1306 __ sve_eor(z15, __ D, 18428729675200069887u); // eor z15.d, z15.d, #0xffc00000000000ff 1307 __ sve_orr(z0, __ B, 239u); // orr z0.b, z0.b, #0xef 1308 1309 // SVEBinaryImmOp 1310 __ sve_add(z5, __ D, 243u); // add z5.d, z5.d, #0xf3 1311 __ sve_sub(z19, __ S, 8u); // sub z19.s, z19.s, #0x8 1312 __ sve_and(z13, __ H, 32256u); // and z13.h, z13.h, #0x7e00 1313 __ sve_eor(z0, __ S, 4294967293u); // eor z0.s, z0.s, #0xfffffffd 1314 __ sve_orr(z21, __ S, 4294965263u); // orr z21.s, z21.s, #0xfffff80f 1315 1316 // SVEBinaryImmOp 1317 __ sve_add(z12, __ H, 20u); // add z12.h, z12.h, #0x14 1318 __ sve_sub(z0, __ H, 190u); // sub z0.h, z0.h, #0xbe 1319 __ sve_and(z23, __ B, 239u); // and z23.b, z23.b, #0xef 1320 __ sve_eor(z27, __ D, 18442240474082197503u); // eor z27.d, z27.d, #0xfff0000000003fff 1321 __ sve_orr(z22, __ B, 124u); // orr z22.b, z22.b, #0x7c 1322 1323 // SVEBinaryImmOp 1324 __ sve_add(z20, __ H, 165u); // add z20.h, z20.h, #0xa5 1325 __ sve_sub(z24, __ D, 72u); // sub z24.d, z24.d, #0x48 1326 __ sve_and(z31, __ S, 4026535935u); // and z31.s, z31.s, #0xf0000fff 1327 __ sve_eor(z21, __ B, 128u); // eor z21.b, z21.b, #0x80 1328 __ sve_orr(z30, __ S, 4294967293u); // orr z30.s, z30.s, #0xfffffffd 1329 1330 // SVEVectorOp 1331 __ sve_add(z26, __ H, z18, z19); // add z26.h, z18.h, z19.h 1332 __ sve_sub(z11, __ S, z13, z29); // sub z11.s, z13.s, z29.s 1333 __ sve_fadd(z5, __ S, z1, z14); // fadd z5.s, z1.s, z14.s 1334 __ sve_fmul(z2, __ S, z7, z10); // fmul z2.s, z7.s, z10.s 1335 __ sve_fsub(z19, __ S, z4, z26); // fsub z19.s, z4.s, z26.s 1336 __ sve_sqadd(z2, __ B, z3, z30); // sqadd z2.b, z3.b, z30.b 1337 __ sve_sqsub(z20, __ D, z5, z20); // sqsub z20.d, z5.d, z20.d 1338 __ sve_uqadd(z29, __ H, z13, z13); // uqadd z29.h, z13.h, z13.h 1339 __ sve_uqsub(z14, __ H, z30, z1); // uqsub z14.h, z30.h, z1.h 1340 __ sve_abs(z28, __ D, p0, z3); // abs z28.d, p0/m, z3.d 1341 __ sve_add(z9, __ B, p6, z9); // add z9.b, p6/m, z9.b, z9.b 1342 __ sve_and(z26, __ B, p2, z14); // and z26.b, p2/m, z26.b, z14.b 1343 __ sve_asr(z20, __ D, p6, z7); // asr z20.d, p6/m, z20.d, z7.d 1344 __ sve_bic(z20, __ D, p4, z6); // bic z20.d, p4/m, z20.d, z6.d 1345 __ sve_clz(z13, __ H, p0, z29); // clz z13.h, p0/m, z29.h 1346 __ sve_cnt(z9, __ B, p0, z1); // cnt z9.b, p0/m, z1.b 1347 __ sve_eor(z27, __ B, p6, z15); // eor z27.b, p6/m, z27.b, z15.b 1348 __ sve_lsl(z4, __ D, p7, z17); // lsl z4.d, p7/m, z4.d, z17.d 1349 __ sve_lsr(z2, __ B, p0, z24); // lsr z2.b, p0/m, z2.b, z24.b 1350 __ sve_mul(z26, __ B, p7, z13); // mul z26.b, p7/m, z26.b, z13.b 1351 __ sve_neg(z22, __ D, p3, z16); // neg z22.d, p3/m, z16.d 1352 __ sve_not(z17, __ D, p1, z11); // not z17.d, p1/m, z11.d 1353 __ sve_orr(z16, __ B, p0, z16); // orr z16.b, p0/m, z16.b, z16.b 1354 __ sve_rbit(z28, __ D, p1, z23); // rbit z28.d, p1/m, z23.d 1355 __ sve_revb(z28, __ D, p4, z10); // revb z28.d, p4/m, z10.d 1356 __ sve_smax(z17, __ S, p7, z7); // smax z17.s, p7/m, z17.s, z7.s 1357 __ sve_smin(z4, __ H, p3, z24); // smin z4.h, p3/m, z4.h, z24.h 1358 __ sve_umax(z9, __ B, p2, z11); // umax z9.b, p2/m, z9.b, z11.b 1359 __ sve_umin(z4, __ S, p5, z22); // umin z4.s, p5/m, z4.s, z22.s 1360 __ sve_sub(z4, __ H, p0, z15); // sub z4.h, p0/m, z4.h, z15.h 1361 __ sve_fabs(z4, __ D, p7, z26); // fabs z4.d, p7/m, z26.d 1362 __ sve_fadd(z5, __ S, p5, z26); // fadd z5.s, p5/m, z5.s, z26.s 1363 __ sve_fdiv(z31, __ S, p0, z25); // fdiv z31.s, p0/m, z31.s, z25.s 1364 __ sve_fmax(z8, __ D, p1, z3); // fmax z8.d, p1/m, z8.d, z3.d 1365 __ sve_fmin(z7, __ D, p6, z24); // fmin z7.d, p6/m, z7.d, z24.d 1366 __ sve_fmul(z24, __ S, p7, z17); // fmul z24.s, p7/m, z24.s, z17.s 1367 __ sve_fneg(z10, __ S, p3, z30); // fneg z10.s, p3/m, z30.s 1368 __ sve_frintm(z8, __ S, p6, z29); // frintm z8.s, p6/m, z29.s 1369 __ sve_frintn(z31, __ D, p5, z31); // frintn z31.d, p5/m, z31.d 1370 __ sve_frintp(z0, __ D, p5, z7); // frintp z0.d, p5/m, z7.d 1371 __ sve_fsqrt(z29, __ S, p6, z22); // fsqrt z29.s, p6/m, z22.s 1372 __ sve_fsub(z29, __ S, p6, z20); // fsub z29.s, p6/m, z29.s, z20.s 1373 __ sve_fmad(z6, __ D, p4, z18, z13); // fmad z6.d, p4/m, z18.d, z13.d 1374 __ sve_fmla(z21, __ S, p2, z0, z19); // fmla z21.s, p2/m, z0.s, z19.s 1375 __ sve_fmls(z28, __ D, p1, z17, z6); // fmls z28.d, p1/m, z17.d, z6.d 1376 __ sve_fmsb(z20, __ D, p6, z28, z14); // fmsb z20.d, p6/m, z28.d, z14.d 1377 __ sve_fnmad(z14, __ S, p4, z10, z26); // fnmad z14.s, p4/m, z10.s, z26.s 1378 __ sve_fnmsb(z24, __ D, p0, z11, z15); // fnmsb z24.d, p0/m, z11.d, z15.d 1379 __ sve_fnmla(z23, __ D, p5, z20, z28); // fnmla z23.d, p5/m, z20.d, z28.d 1380 __ sve_fnmls(z20, __ D, p7, z24, z0); // fnmls z20.d, p7/m, z24.d, z0.d 1381 __ sve_mla(z6, __ B, p5, z13, z12); // mla z6.b, p5/m, z13.b, z12.b 1382 __ sve_mls(z13, __ S, p7, z26, z23); // mls z13.s, p7/m, z26.s, z23.s 1383 __ sve_and(z6, z2, z29); // and z6.d, z2.d, z29.d 1384 __ sve_eor(z0, z29, z23); // eor z0.d, z29.d, z23.d 1385 __ sve_orr(z4, z5, z8); // orr z4.d, z5.d, z8.d 1386 __ sve_bic(z13, z17, z13); // bic z13.d, z17.d, z13.d 1387 __ sve_uzp1(z8, __ H, z10, z8); // uzp1 z8.h, z10.h, z8.h 1388 __ sve_uzp2(z19, __ S, z0, z29); // uzp2 z19.s, z0.s, z29.s 1389 __ sve_fabd(z16, __ D, p3, z23); // fabd z16.d, p3/m, z16.d, z23.d 1390 __ sve_bext(z23, __ B, z30, z13); // bext z23.b, z30.b, z13.b 1391 __ sve_bdep(z25, __ H, z22, z0); // bdep z25.h, z22.h, z0.h 1392 __ sve_eor3(z25, z30, z11); // eor3 z25.d, z25.d, z30.d, z11.d 1393 __ sve_sqadd(z14, __ H, p5, z22); // sqadd z14.h, p5/m, z14.h, z22.h 1394 __ sve_sqsub(z5, __ H, p4, z0); // sqsub z5.h, p4/m, z5.h, z0.h 1395 __ sve_uqadd(z9, __ D, p0, z3); // uqadd z9.d, p0/m, z9.d, z3.d 1396 __ sve_uqsub(z14, __ H, p1, z29); // uqsub z14.h, p1/m, z14.h, z29.h 1397 1398 // SVEReductionOp 1399 __ sve_andv(v14, __ D, p5, z4); // andv d14, p5, z4.d 1400 __ sve_orv(v27, __ S, p3, z22); // orv s27, p3, z22.s 1401 __ sve_eorv(v31, __ S, p6, z11); // eorv s31, p6, z11.s 1402 __ sve_smaxv(v12, __ B, p4, z28); // smaxv b12, p4, z28.b 1403 __ sve_sminv(v28, __ D, p4, z4); // sminv d28, p4, z4.d 1404 __ sve_fminv(v6, __ D, p0, z15); // fminv d6, p0, z15.d 1405 __ sve_fmaxv(v1, __ D, p5, z18); // fmaxv d1, p5, z18.d 1406 __ sve_fadda(v2, __ S, p2, z4); // fadda s2, p2, s2, z4.s 1407 __ sve_uaddv(v11, __ S, p2, z28); // uaddv d11, p2, z28.s 1408 1409 // AddWideNEONOp 1410 __ saddwv(v3, v4, __ T8H, v5, __ T8B); // saddw v3.8H, v4.8H, v5.8B 1411 __ saddwv2(v21, v22, __ T8H, v23, __ T16B); // saddw2 v21.8H, v22.8H, v23.16B 1412 __ saddwv(v31, v0, __ T4S, v1, __ T4H); // saddw v31.4S, v0.4S, v1.4H 1413 __ saddwv2(v11, v12, __ T4S, v13, __ T8H); // saddw2 v11.4S, v12.4S, v13.8H 1414 __ saddwv(v24, v25, __ T2D, v26, __ T2S); // saddw v24.2D, v25.2D, v26.2S 1415 __ saddwv2(v21, v22, __ T2D, v23, __ T4S); // saddw2 v21.2D, v22.2D, v23.4S 1416 __ uaddwv(v15, v16, __ T8H, v17, __ T8B); // uaddw v15.8H, v16.8H, v17.8B 1417 __ uaddwv2(v12, v13, __ T8H, v14, __ T16B); // uaddw2 v12.8H, v13.8H, v14.16B 1418 __ uaddwv(v6, v7, __ T4S, v8, __ T4H); // uaddw v6.4S, v7.4S, v8.4H 1419 __ uaddwv2(v13, v14, __ T4S, v15, __ T8H); // uaddw2 v13.4S, v14.4S, v15.8H 1420 __ uaddwv(v8, v9, __ T2D, v10, __ T2S); // uaddw v8.2D, v9.2D, v10.2S 1421 __ uaddwv2(v15, v16, __ T2D, v17, __ T4S); // uaddw2 v15.2D, v16.2D, v17.4S 1422 1423 __ bind(forth); 1424 1425 /* 1426 */ 1427 1428 static const unsigned int insns[] = 1429 { 1430 0x8b0d82fa, 0xcb49970c, 0xab889dfc, 0xeb9ee787, 1431 0x0b9b3ec9, 0x4b9179a3, 0x2b88474e, 0x6b8c56c0, 1432 0x8a1a51e0, 0xaa11f4ba, 0xca0281b8, 0xea918c7c, 1433 0x0a5d4a19, 0x2a4b262d, 0x4a513ca5, 0x6a9b6ae2, 1434 0x8a70b79b, 0xaaba9728, 0xca6dfe3d, 0xea627f1c, 1435 0x0aa70f53, 0x2aaa0f06, 0x4a6176a4, 0x6a604eb0, 1436 0x1105ed91, 0x3100583e, 0x5101f8bd, 0x710f0306, 1437 0x9101a1a0, 0xb10a5cc8, 0xd10810aa, 0xf10fd061, 1438 0x120cb166, 0x321764bc, 0x52174681, 0x720c0227, 1439 0x9241018e, 0xb25a2969, 0xd278b411, 0xf26aad01, 1440 0x14000000, 0x17ffffd7, 0x140004af, 0x94000000, 1441 0x97ffffd4, 0x940004ac, 0x3400000a, 0x34fffa2a, 1442 0x3400952a, 0x35000008, 0x35fff9c8, 0x350094c8, 1443 0xb400000b, 0xb4fff96b, 0xb400946b, 0xb500001d, 1444 0xb5fff91d, 0xb500941d, 0x10000013, 0x10fff8b3, 1445 0x100093b3, 0x90000013, 0x36300016, 0x3637f836, 1446 0x36309336, 0x3758000c, 0x375ff7cc, 0x375892cc, 1447 0x128313a0, 0x528a32c7, 0x7289173b, 0x92ab3acc, 1448 0xd2a0bf94, 0xf2c285e8, 0x9358722f, 0x330e652f, 1449 0x53067f3b, 0x93577c53, 0xb34a1aac, 0xd35a4016, 1450 0x13946c63, 0x93c3dbc8, 0x54000000, 0x54fff5a0, 1451 0x540090a0, 0x54000001, 0x54fff541, 0x54009041, 1452 0x54000002, 0x54fff4e2, 0x54008fe2, 0x54000002, 1453 0x54fff482, 0x54008f82, 0x54000003, 0x54fff423, 1454 0x54008f23, 0x54000003, 0x54fff3c3, 0x54008ec3, 1455 0x54000004, 0x54fff364, 0x54008e64, 0x54000005, 1456 0x54fff305, 0x54008e05, 0x54000006, 0x54fff2a6, 1457 0x54008da6, 0x54000007, 0x54fff247, 0x54008d47, 1458 0x54000008, 0x54fff1e8, 0x54008ce8, 0x54000009, 1459 0x54fff189, 0x54008c89, 0x5400000a, 0x54fff12a, 1460 0x54008c2a, 0x5400000b, 0x54fff0cb, 0x54008bcb, 1461 0x5400000c, 0x54fff06c, 0x54008b6c, 0x5400000d, 1462 0x54fff00d, 0x54008b0d, 0x5400000e, 0x54ffefae, 1463 0x54008aae, 0x5400000f, 0x54ffef4f, 0x54008a4f, 1464 0xd40658e1, 0xd4014d22, 0xd4046543, 0xd4273f60, 1465 0xd44cad80, 0xd503201f, 0xd503203f, 0xd503205f, 1466 0xd503209f, 0xd50320bf, 0xd503219f, 0xd50323bf, 1467 0xd503239f, 0xd50321df, 0xd50323ff, 0xd50323df, 1468 0xd503211f, 0xd503233f, 0xd503231f, 0xd503215f, 1469 0xd503237f, 0xd503235f, 0xd69f03e0, 0xd6bf03e0, 1470 0xd5033fdf, 0xd503207f, 0xd50320ff, 0xd5033e9f, 1471 0xd50332bf, 0xd61f0200, 0xd63f0280, 0xdac123ea, 1472 0xdac127fb, 0xdac12be8, 0xdac12fe0, 0xdac133e1, 1473 0xdac137f5, 0xdac13bf1, 0xdac13ffd, 0xdac147fd, 1474 0xd61f0b9f, 0xd61f0c3f, 0xd63f0aff, 0xd63f0ebf, 1475 0xd51b4434, 0xd51b4216, 0xd53b443b, 0xd53b4213, 1476 0xd53b00eb, 0xd53b0030, 0xdac143e6, 0xc8117c80, 1477 0xc80afed8, 0xc85f7e6a, 0xc85ffca1, 0xc89ffd1e, 1478 0xc8dffe2c, 0x88097cee, 0x8801fe05, 0x885f7d82, 1479 0x885ffd8a, 0x889fff83, 0x88dfff4e, 0x481e7dca, 1480 0x4815fd2d, 0x485f7f76, 0x485ffe7c, 0x489fffcb, 1481 0x48dffc53, 0x08027c37, 0x0800fe0c, 0x085f7ded, 1482 0x085ffeb1, 0x089ffd6d, 0x08dffd1e, 0xc87f3578, 1483 0xc87feaa1, 0xc83b506d, 0xc82c87a6, 0x887f1166, 1484 0x887f93d0, 0x883e32a4, 0x883bf12f, 0xf80011f9, 1485 0xb81b1022, 0x381ea354, 0x79002fd7, 0xf85cf39a, 1486 0xb8580309, 0x385e218c, 0x784051e1, 0x389e11d8, 1487 0x789fa1f8, 0x79c01865, 0xb881131b, 0xfc5dd3ad, 1488 0xbc5d1137, 0xfc00900b, 0xbc181015, 0xf818ec7d, 1489 0xb81b8c91, 0x381efc40, 0x78007c3d, 0xf857beb0, 1490 0xb8413dd4, 0x385fddd6, 0x78409e2f, 0x389eddea, 1491 0x789e7d94, 0x78de3d55, 0xb8805c13, 0xfc5cadc0, 1492 0xbc428c23, 0xfc1a2dc4, 0xbc1caf92, 0xf81475f6, 1493 0xb81f95d1, 0x381e757e, 0x78014561, 0xf8402436, 1494 0xb85896e2, 0x385f4763, 0x785db4f0, 0x3880374f, 1495 0x789e25e7, 0x78dd0563, 0xb88166f9, 0xfc529540, 1496 0xbc4374d3, 0xfc1166ae, 0xbc1ba6c0, 0xf820ea7b, 1497 0xb82d68c8, 0x38367a04, 0x782f4b59, 0xf878c8a4, 1498 0xb8674a24, 0x386b78f1, 0x78776bc0, 0x38a15aca, 1499 0x78bedbd5, 0x78fcd94b, 0xb8aa4a7c, 0xfc6ecbbe, 1500 0xbc65d8a8, 0xfc2de919, 0xbc3a7b11, 0xf91f1193, 1501 0xb91ed5f7, 0x391ec9bd, 0x79182ceb, 0xf95d4b0a, 1502 0xb9581010, 0x395fc034, 0x795fb221, 0x399d8731, 1503 0x799efb3b, 0x79dd1a2e, 0xb998e4ea, 0xfd583723, 1504 0xbd5ea12c, 0xfd18dc38, 0xbd1b0e83, 0x58ffdaa2, 1505 0x1800001d, 0xf885d1c0, 0xd8ffda40, 0xf8a77820, 1506 0xf9980220, 0x1a030301, 0x3a140311, 0x5a0d000b, 1507 0x7a07015c, 0x9a1001e4, 0xba140182, 0xda0d01bd, 1508 0xfa0c00ce, 0x0b31f194, 0x2b206d7b, 0xcb29f027, 1509 0x6b210f63, 0x8b2cb34d, 0xab2a88b1, 0xcb2f511e, 1510 0xeb3332f3, 0x3a4533aa, 0x7a4d312b, 0xba442146, 1511 0xfa42818c, 0x3a466a02, 0x7a4b68ed, 0xba4a9b6b, 1512 0xfa4dd86d, 0x1a8a637a, 0x1a9cd6aa, 0x5a9bd137, 1513 0x5a8fd7aa, 0x9a95233e, 0x9a95c620, 0xda9422b0, 1514 0xda8397d3, 0x5ac00173, 0x5ac00418, 0x5ac00b3b, 1515 0x5ac0106e, 0x5ac0162e, 0xdac001e7, 0xdac00798, 1516 0xdac00b31, 0xdac00f42, 0xdac010bc, 0xdac01759, 1517 0xdac1021b, 0xdac104d1, 0xdac10995, 0xdac10c80, 1518 0xdac1136c, 0xdac11791, 0xdac1185c, 0xdac11d51, 1519 0xd71f09ee, 0xd71f0dc3, 0xd73f0b2f, 0xd73f0e6e, 1520 0x1ac40a05, 0x1ac40f3a, 0x1acc2042, 0x1ac8263d, 1521 0x1ac42867, 0x1ada2c99, 0x9ad10899, 0x9ad10f40, 1522 0x9ad521f7, 0x9adb263c, 0x9ac0286a, 0x9ac92f27, 1523 0x9bdd7de6, 0x9b427d4f, 0x1b0b2cf1, 0x1b1ddcf7, 1524 0x9b0b2f6e, 0x9b0cbf04, 0x9b2b728e, 0x9b2cdd6d, 1525 0x9bae275e, 0x9ba7954d, 0x7ec315fe, 0x1ef0098c, 1526 0x1ef21bff, 0x1ef02ab3, 0x1ef5394f, 0x1efc4942, 1527 0x1eff5bc7, 0x1ee28832, 0x7ea3d546, 0x1e270979, 1528 0x1e201981, 0x1e3d2a63, 0x1e263ae6, 0x1e3b4b80, 1529 0x1e2758a2, 0x1e39899d, 0x7ef8d58d, 0x1e720913, 1530 0x1e751b56, 0x1e622a74, 0x1e683ade, 0x1e754a76, 1531 0x1e755a4c, 0x1e638a06, 0x1fc373a3, 0x1f0a35cf, 1532 0x1f0aea4c, 0x1f2f74e7, 0x1f2032e0, 0x1f4d21d8, 1533 0x1f49d0ef, 0x1f7f43b3, 0x1f705522, 0x1e20409e, 1534 0x1e20c361, 0x1e214319, 0x1e21c2ae, 0x1e22c0cd, 1535 0x1e23c32c, 0x1ee243d9, 0x1e6042bc, 0x1e60c2f0, 1536 0x1e6143a5, 0x1e61c276, 0x1e62428d, 0x1ee1c393, 1537 0x1e3800d1, 0x9e3800ed, 0x1e78035c, 0x9e7800d1, 1538 0x1e220081, 0x9e22028e, 0x1e6202a7, 0x9e6202fb, 1539 0x1e24028d, 0x9e64039e, 0x1e3002aa, 0x9e700225, 1540 0x1e2601cb, 0x9e6602ad, 0x1e2701db, 0x9e6702e4, 1541 0x1e3e2300, 0x1e6e2180, 0x1e202228, 0x1e602388, 1542 0x29021b40, 0x297c78c0, 0x69660970, 0xa908018f, 1543 0xa9427ae7, 0x29a03cfa, 0x29fc3d4b, 0x69c84033, 1544 0xa988240e, 0xa9fa0d9b, 0x28a02d88, 0x28c8408a, 1545 0x68f87a6a, 0xa8ba09f8, 0xa8c52a18, 0x280257be, 1546 0x28727948, 0xa83868de, 0xa8440a98, 0x0c40733f, 1547 0x4cdfa1e5, 0x0ccd6cea, 0x4cdf260d, 0x0d40c227, 1548 0x4ddfcb30, 0x0dc7cc6b, 0x4c408ced, 0x0cdf8769, 1549 0x4d60c346, 0x0dffca17, 0x4de8cda6, 0x4cda4834, 1550 0x0c4049ef, 0x4d40e6dd, 0x4ddfe946, 0x0dcfeccf, 1551 0x4cdf0546, 0x0cc7006b, 0x0d60e32c, 0x0dffe5eb, 1552 0x0dfce8de, 0x0e31bb9b, 0x4e31bbbc, 0x0e71b841, 1553 0x4e71bbbc, 0x4eb1b841, 0x0e30aab4, 0x4e30abdd, 1554 0x0e70aa30, 0x4e70a9cd, 0x4eb0a96a, 0x6e30fbdd, 1555 0x0e31abdd, 0x2e31aa93, 0x4e31aaf6, 0x6e31a96a, 1556 0x0e71a8a4, 0x2e71a81f, 0x4e71aad5, 0x6e71a928, 1557 0x4eb1a81f, 0x6eb1aa93, 0x6eb0f96a, 0x7e30fbbc, 1558 0x7e70f862, 0x7eb0fb59, 0x7ef0f8c5, 0x0ea0c883, 1559 0x4ea0c928, 0x4ee0caf6, 0x2ea0ca93, 0x6ea0c9cd, 1560 0x6ee0c8c5, 0x0ea0dbdd, 0x4ea0db38, 0x4ee0dad5, 1561 0x0ea0eb7a, 0x4ea0eb38, 0x4ee0e883, 0x2ea0db38, 1562 0x6ea0db7a, 0x6ee0db17, 0x0e20ba0f, 0x4e20bad5, 1563 0x0e60b883, 0x4e60bb38, 0x0ea0b928, 0x4ea0bb59, 1564 0x4ee0bab4, 0x0ea0fa30, 0x4ea0fa51, 0x4ee0f862, 1565 0x0ef8f841, 0x4ef8f820, 0x2ea0fb38, 0x6ea0f8a4, 1566 0x6ee0f883, 0x2ef8f9ac, 0x6ef8f81f, 0x2ea1fbbc, 1567 0x6ea1f96a, 0x6ee1fb7a, 0x2ef9f862, 0x6ef9f9ac, 1568 0x2e205a72, 0x6e20581f, 0x0e231c41, 0x4e2f1dcd, 1569 0x0ebf1fdd, 0x4ea21c20, 0x2e351e93, 0x6e2e1dac, 1570 0x0e338651, 0x4e3886f6, 0x0e6f85cd, 0x4e7e87bc, 1571 0x0ea087fe, 0x4ea1841f, 0x4ee38441, 0x0e3c0f7a, 1572 0x4e3e0fbc, 0x0e660ca4, 0x4e600ffe, 0x0ea60ca4, 1573 0x4ea80ce6, 0x4ee00ffe, 0x2e3c0f7a, 0x6e340e72, 1574 0x2e6b0d49, 0x6e6a0d28, 0x2eae0dac, 0x6ea20c20, 1575 0x6ef60eb4, 0x0e23d441, 0x4e3ad738, 0x4e64d462, 1576 0x0e421420, 0x4e4b1549, 0x2e3a8738, 0x6e3c877a, 1577 0x2e728630, 0x6e6087fe, 0x2ea58483, 0x6eac856a, 1578 0x6ef98717, 0x0e2c2d6a, 0x4e262ca4, 0x0e742e72, 1579 0x4e642c62, 0x0ead2d8b, 0x4eaa2d28, 0x4eec2d6a, 1580 0x2e312e0f, 0x6e332e51, 0x2e642c62, 0x6e6c2d6a, 1581 0x2eae2dac, 0x6eae2dac, 0x6ef12e0f, 0x0eafd5cd, 1582 0x4ea4d462, 0x4ee9d507, 0x0ed616b4, 0x4edc177a, 1583 0x0e329e30, 0x4e269ca4, 0x0e649c62, 0x4e669ca4, 1584 0x0eae9dac, 0x4eb49e72, 0x2eb7d6d5, 0x6eb2d630, 1585 0x6ef4d672, 0x2ecd158b, 0x6ed716d5, 0x2e39d717, 1586 0x6e2ed5ac, 0x6e7cd77a, 0x2e591717, 0x6e5e17bc, 1587 0x2e30ddee, 0x6e2ddd8b, 0x6e7adf38, 0x2e431c41, 1588 0x6e4e1dac, 0x0e61941f, 0x4e6c956a, 0x0eb29630, 1589 0x4ea99507, 0x0e24cc62, 0x4e25cc83, 0x4e6fcdcd, 1590 0x0e550e93, 0x4e530e51, 0x2e729630, 0x6e659483, 1591 0x2ea39441, 0x6ead958b, 0x0ea0cffe, 0x4ea7ccc5, 1592 0x4eeacd28, 0x0ed10e0f, 0x4edf0fdd, 0x2e20fffe, 1593 0x6e22fc20, 0x6e76feb4, 0x2e493d07, 0x6e563eb4, 1594 0x0e396717, 0x4e3e67bc, 0x0e7766d5, 0x4e7d679b, 1595 0x0ebb6759, 0x4ea764c5, 0x2e236441, 0x6e396717, 1596 0x2e726630, 0x6e61641f, 0x2ea764c5, 0x6eae65ac, 1597 0x0e2ba549, 0x4e3ea7bc, 0x0e71a60f, 0x4e7fa7dd, 1598 0x0eb8a6f6, 0x4ea1a41f, 0x0e35f693, 0x4e21f41f, 1599 0x4e67f4c5, 0x0e5035ee, 0x4e543672, 0x0e216c1f, 1600 0x4e346e72, 0x0e7d6f9b, 0x4e766eb4, 0x0eb26e30, 1601 0x4eae6dac, 0x2e2d6d8b, 0x6e2b6d49, 0x2e686ce6, 1602 0x6e606ffe, 0x2eb36e51, 0x6ebd6f9b, 0x0e3eafbc, 1603 0x4e20affe, 0x0e69ad07, 0x4e6cad6a, 0x0eb6aeb4, 1604 0x4eacad6a, 0x0e66b4a4, 0x4e7ab738, 0x0eb3b651, 1605 0x4eb3b651, 0x0e3826f6, 0x4e252483, 0x0e7f27dd, 1606 0x4e71260f, 0x0eb826f6, 0x4eb52693, 0x0eb5f693, 1607 0x4eb8f6f6, 0x4ee4f462, 0x0ed1360f, 0x4ec834e6, 1608 0x2eaeedac, 0x6eb2ee30, 0x6eeded8b, 0x2ecf2dcd, 1609 0x6ed92f17, 0x0f81100f, 0x4f848862, 0x4fc31841, 1610 0x0fad518b, 0x4fa780c5, 0x4fd059ee, 0x2fa890e6, 1611 0x4fa38841, 0x6fc1900f, 0x0f7b8149, 0x4f4688a4, 1612 0x0faf81cd, 0x4fa58083, 0x0e3736d5, 0x4e393717, 1613 0x0e61341f, 0x4e7b3759, 0x0ea43462, 0x4ea1341f, 1614 0x4efd379b, 0x0e343e72, 0x4e2c3d6a, 0x0e793f17, 1615 0x4e753e93, 0x0ea53c83, 0x4eb43e72, 0x4ee23c20, 1616 0x2e3b8f59, 0x6e3c8f7a, 0x2e798f17, 0x6e648c62, 1617 0x2eb48e72, 0x6eae8dac, 0x6ee68ca4, 0x2e3e37bc, 1618 0x6e2037fe, 0x2e7f37dd, 0x6e723630, 0x2ebd379b, 1619 0x6ea834e6, 0x6eeb3549, 0x2e3f3fdd, 0x6e343e72, 1620 0x2e693d07, 0x6e663ca4, 0x2ea93d07, 0x6eb13e0f, 1621 0x6eeb3d49, 0x0e39e717, 0x4e2ae528, 0x4e64e462, 1622 0x2ebee7bc, 0x6eb7e6d5, 0x6ee1e41f, 0x2e27e4c5, 1623 0x6e3de79b, 0x6e62e420, 0x659239e8, 0x65d03b94, 1624 0x65d0232d, 0x65d120c2, 0x659129f2, 0x65933ca3, 1625 0x25969683, 0x25961d15, 0x254d1c48, 0x259e3f61, 1626 0x25953b96, 0x255b91d1, 0x247686ed, 0x24309098, 1627 0x2462edb9, 0x24a57468, 0xba5fd3e3, 0x3a5f03e5, 1628 0xfa411be4, 0x7a42cbe2, 0x93df03ff, 0xc820ffff, 1629 0x8822fc7f, 0xc8247cbf, 0x88267fff, 0x4e010fe0, 1630 0x5e040420, 0x4e081fe1, 0x4e0c1fe1, 0x4e0a1fe1, 1631 0x4e071fe1, 0x4e042c20, 0x4e062c20, 0x4e052c20, 1632 0x4e083c20, 0x0e0c3c20, 0x0e0a3c20, 0x0e073c20, 1633 0x9eae0020, 0x0f03f409, 0x6f03f40e, 0x4cc0ac3f, 1634 0x0ea1b820, 0x0ef9b820, 0x4ef9b820, 0x4e21c862, 1635 0x0e79c862, 0x4e79c862, 0x4e61b8a4, 0x0e79b8a4, 1636 0x4e79b8a4, 0x05a08020, 0x05104fe0, 0x05505001, 1637 0x05906fe2, 0x05d03005, 0x05101fea, 0x05901feb, 1638 0x04b0e3e0, 0x0470e7e1, 0x042f9c20, 0x043f9c35, 1639 0x047f9c20, 0x04ff9c20, 0x04299420, 0x04319160, 1640 0x0461943e, 0x04a19020, 0x04038100, 0x040381a0, 1641 0x040387e1, 0x04438be2, 0x04c38fe3, 0x040181e0, 1642 0x04018100, 0x04018621, 0x04418b22, 0x04418822, 1643 0x04818c23, 0x040081e0, 0x04008120, 0x04008761, 1644 0x04008621, 0x04408822, 0x04808c23, 0x042053ff, 1645 0x047f5401, 0x25208028, 0x2538cfe0, 0x2578d001, 1646 0x25b8efe2, 0x25f8f007, 0x2538dfea, 0x25b8dfeb, 1647 0xa400a3e0, 0xa420a7e0, 0xa4484be0, 0xa467afe0, 1648 0xa4a8a7ea, 0xa547a814, 0xa4084ffe, 0xa55c53e0, 1649 0xa5e1540b, 0xe400fbf6, 0xe408ffff, 0xe420e7e0, 1650 0xe4484be0, 0xe460efe0, 0xe547e400, 0xe4014be0, 1651 0xe4a84fe0, 0xe5f15000, 0x858043e0, 0x85a043ff, 1652 0xe59f5d08, 0x0420e3e9, 0x0460e3ea, 0x04a0e3eb, 1653 0x04e0e3ec, 0x25104042, 0x25104871, 0x25904861, 1654 0x25904c92, 0x05344020, 0x05744041, 0x05b44062, 1655 0x05f44083, 0x252c8840, 0x253c1420, 0x25681572, 1656 0x25a21ce3, 0x25ea1e34, 0x253c0421, 0x25680572, 1657 0x25a20ce3, 0x25ea0e34, 0x0522c020, 0x05e6c0a4, 1658 0x2401a001, 0x2443a051, 0x24858881, 0x24c78cd1, 1659 0x24850891, 0x24c70cc1, 0x250f9001, 0x25508051, 1660 0x25802491, 0x25df28c1, 0x25850c81, 0x251e10d1, 1661 0x65816001, 0x65c36051, 0x65854891, 0x65c74cc1, 1662 0x05733820, 0x05b238a4, 0x05f138e6, 0x0570396a, 1663 0x65d0a001, 0x65d6a443, 0x65d4a826, 0x6594ac26, 1664 0x6554ac26, 0x6556ac26, 0x6552ac26, 0x65cbac85, 1665 0x65caac01, 0x6589ac85, 0x6588ac01, 0x65c9ac85, 1666 0x65c8ac01, 0x65dea833, 0x659ca509, 0x65d8a801, 1667 0x65dcac01, 0x655cb241, 0x0520a1e0, 0x0521a601, 1668 0x052281e0, 0x05238601, 0x04a14026, 0x042244a6, 1669 0x046344a6, 0x04a444a6, 0x04e544a7, 0x0568aca7, 1670 0x05b23230, 0x853040af, 0xc5b040af, 0xe57080af, 1671 0xe5b080af, 0x25034440, 0x254054c4, 0x25034640, 1672 0x25415a05, 0x25834440, 0x25c54489, 0x250b5d3a, 1673 0x2550dc20, 0x2518e3e1, 0x2518e021, 0x2518e0a1, 1674 0x2518e121, 0x2518e1a1, 0x2558e3e2, 0x2558e042, 1675 0x2558e0c2, 0x2558e142, 0x2598e3e3, 0x2598e063, 1676 0x2598e0e3, 0x2598e163, 0x25d8e3e4, 0x25d8e084, 1677 0x25d8e104, 0x25d8e184, 0x2518e407, 0x05214800, 1678 0x05614800, 0x05a14800, 0x05e14800, 0x05214c00, 1679 0x05614c00, 0x05a14c00, 0x05e14c00, 0x05304001, 1680 0x05314001, 0x05a18610, 0x05e18610, 0x05271e11, 1681 0x6545e891, 0x6585e891, 0x65c5e891, 0x6545c891, 1682 0x6585c891, 0x65c5c891, 0x45b0c210, 0x45f1c231, 1683 0x1e601000, 0x1e603000, 0x1e621000, 0x1e623000, 1684 0x1e641000, 0x1e643000, 0x1e661000, 0x1e663000, 1685 0x1e681000, 0x1e683000, 0x1e6a1000, 0x1e6a3000, 1686 0x1e6c1000, 0x1e6c3000, 0x1e6e1000, 0x1e6e3000, 1687 0x1e701000, 0x1e703000, 0x1e721000, 0x1e723000, 1688 0x1e741000, 0x1e743000, 0x1e761000, 0x1e763000, 1689 0x1e781000, 0x1e783000, 0x1e7a1000, 0x1e7a3000, 1690 0x1e7c1000, 0x1e7c3000, 0x1e7e1000, 0x1e7e3000, 1691 0xf8268267, 0xf82d023c, 0xf8301046, 0xf83d2083, 1692 0xf8263290, 0xf82d528c, 0xf8284299, 0xf8337160, 1693 0xf8386286, 0xf8bf820e, 0xf8a600e0, 0xf8af1353, 1694 0xf8a922ea, 0xf8b53396, 0xf8a251e3, 0xf8b340f4, 1695 0xf8a470fd, 0xf8a06209, 0xf8f48097, 0xf8f002ea, 1696 0xf8eb10d9, 0xf8ff21b0, 0xf8f7302c, 0xf8ee52a9, 1697 0xf8f041fa, 0xf8e471e4, 0xf8e863c6, 0xf864823d, 1698 0xf87d013a, 0xf86f1162, 0xf87d20e3, 0xf86132bb, 1699 0xf870510e, 0xf8704336, 0xf86572b4, 0xf8706217, 1700 0xb83e8294, 0xb8200264, 0xb8381284, 0xb8242358, 1701 0xb8333102, 0xb828530e, 0xb83042df, 0xb824703f, 1702 0xb82a6194, 0xb8a080e9, 0xb8b80090, 0xb8bb1146, 1703 0xb8bb21b8, 0xb8b032df, 0xb8b653f4, 0xb8bd41c9, 1704 0xb8b47287, 0xb8bc6169, 0xb8ee828c, 0xb8e10138, 1705 0xb8f3126d, 0xb8f020b0, 0xb8e03183, 0xb8e851ef, 1706 0xb8f041e4, 0xb8fe7005, 0xb8ea6376, 0xb8638120, 1707 0xb873015d, 0xb8781284, 0xb86723b8, 0xb86e3175, 1708 0xb87b51ed, 0xb87f41d1, 0xb863721e, 0xb87660f4, 1709 0xce216874, 0xce104533, 0xce648c15, 0xce8e3302, 1710 0xce6e82ab, 0xce6c87d1, 0xcec08063, 0xce638937, 1711 0x25e0c358, 0x25a1c7d3, 0x0580785a, 0x05426328, 1712 0x05009892, 0x25a0cc29, 0x2561cec8, 0x058044b3, 1713 0x05401c99, 0x05006b49, 0x25e0d6f7, 0x2561c528, 1714 0x0583c8bc, 0x0542522f, 0x05001ec0, 0x25e0de65, 1715 0x25a1c113, 0x05803cad, 0x0540f3c0, 0x0500ab15, 1716 0x2560c28c, 0x2561d7c0, 0x05801ed7, 0x0542633b, 1717 0x05003696, 0x2560d4b4, 0x25e1c918, 0x058021ff, 1718 0x05400e15, 0x0500f3de, 0x0473025a, 0x04bd05ab, 1719 0x658e0025, 0x658a08e2, 0x659a0493, 0x043e1062, 1720 0x04f418b4, 0x046d15bd, 0x04611fce, 0x04d6a07c, 1721 0x04001929, 0x041a09da, 0x04d098f4, 0x04db10d4, 1722 0x0459a3ad, 0x041aa029, 0x041919fb, 0x04d39e24, 1723 0x04118302, 0x04101dba, 0x04d7ae16, 0x04dea571, 1724 0x04180210, 0x05e786fc, 0x05e4915c, 0x04881cf1, 1725 0x044a0f04, 0x04090969, 0x048b16c4, 0x044101e4, 1726 0x04dcbf44, 0x65809745, 0x658d833f, 0x65c68468, 1727 0x65c79b07, 0x65829e38, 0x049dafca, 0x6582bba8, 1728 0x65c0b7ff, 0x65c1b4e0, 0x658dbadd, 0x65819a9d, 1729 0x65ed9246, 0x65b30815, 0x65e6263c, 0x65eebb94, 1730 0x65bad14e, 0x65efe178, 0x65fc5697, 0x65e07f14, 1731 0x040c55a6, 0x04977f4d, 0x043d3046, 0x04b733a0, 1732 0x046830a4, 0x04ed322d, 0x05686948, 0x05bd6c13, 1733 0x65c88ef0, 0x450db3d7, 0x4540b6d9, 0x043e3979, 1734 0x445896ce, 0x445a9005, 0x44d98069, 0x445b87ae, 1735 0x04da348e, 0x04982edb, 0x0499397f, 0x0408338c, 1736 0x04ca309c, 0x65c721e6, 0x65c63641, 0x65982882, 1737 0x04812b8b, 0x0e251083, 0x4e3712d5, 0x0e61101f, 1738 0x4e6d118b, 0x0eba1338, 0x4eb712d5, 0x2e31120f, 1739 0x6e2e11ac, 0x2e6810e6, 0x6e6f11cd, 0x2eaa1128, 1740 0x6eb1120f, 1741 }; 1742 // END Generated code -- do not edit